1 use alloc::{self, Layout};
6 use super::waitqueue::{
7 try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
12 readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
13 writer: SpinMutex<WaitVariable<bool>>,
16 // Below is to check at compile time, that RWLock has size of 128 bytes.
18 unsafe fn rw_lock_size_assert(r: RWLock) {
19 mem::transmute::<RWLock, [u8; 128]>(r);
22 //unsafe impl Send for RWLock {}
23 //unsafe impl Sync for RWLock {} // FIXME
26 pub const fn new() -> RWLock {
28 readers: SpinMutex::new(WaitVariable::new(None)),
29 writer: SpinMutex::new(WaitVariable::new(false)),
34 pub unsafe fn read(&self) {
35 let mut rguard = self.readers.lock();
36 let wguard = self.writer.lock();
37 if *wguard.lock_var() || !wguard.queue_empty() {
38 // Another thread has or is waiting for the write lock, wait
40 WaitQueue::wait(rguard);
41 // Another thread has passed the lock to us
43 // No waiting writers, acquire the read lock
44 *rguard.lock_var_mut() =
45 NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
50 pub unsafe fn try_read(&self) -> bool {
51 let mut rguard = try_lock_or_false!(self.readers);
52 let wguard = try_lock_or_false!(self.writer);
53 if *wguard.lock_var() || !wguard.queue_empty() {
54 // Another thread has or is waiting for the write lock
57 // No waiting writers, acquire the read lock
58 *rguard.lock_var_mut() =
59 NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
65 pub unsafe fn write(&self) {
66 let rguard = self.readers.lock();
67 let mut wguard = self.writer.lock();
68 if *wguard.lock_var() || rguard.lock_var().is_some() {
69 // Another thread has the lock, wait
71 WaitQueue::wait(wguard);
72 // Another thread has passed the lock to us
74 // We are just now obtaining the lock
75 *wguard.lock_var_mut() = true;
80 pub unsafe fn try_write(&self) -> bool {
81 let rguard = try_lock_or_false!(self.readers);
82 let mut wguard = try_lock_or_false!(self.writer);
83 if *wguard.lock_var() || rguard.lock_var().is_some() {
84 // Another thread has the lock
87 // We are just now obtaining the lock
88 *wguard.lock_var_mut() = true;
94 unsafe fn __read_unlock(
96 mut rguard: SpinMutexGuard<WaitVariable<Option<NonZeroUsize>>>,
97 wguard: SpinMutexGuard<WaitVariable<bool>>,
99 *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1);
100 if rguard.lock_var().is_some() {
101 // There are other active readers
103 if let Ok(mut wguard) = WaitQueue::notify_one(wguard) {
104 // A writer was waiting, pass the lock
105 *wguard.lock_var_mut() = true;
107 // No writers were waiting, the lock is released
108 assert!(rguard.queue_empty());
114 pub unsafe fn read_unlock(&self) {
115 let rguard = self.readers.lock();
116 let wguard = self.writer.lock();
117 self.__read_unlock(rguard, wguard);
121 unsafe fn __write_unlock(
123 rguard: SpinMutexGuard<WaitVariable<Option<NonZeroUsize>>>,
124 wguard: SpinMutexGuard<WaitVariable<bool>>,
126 if let Err(mut wguard) = WaitQueue::notify_one(wguard) {
127 // No writers waiting, release the write lock
128 *wguard.lock_var_mut() = false;
129 if let Ok(mut rguard) = WaitQueue::notify_all(rguard) {
130 // One or more readers were waiting, pass the lock to them
131 if let NotifiedTcs::All { count } = rguard.notified_tcs() {
132 *rguard.lock_var_mut() = Some(count)
134 unreachable!() // called notify_all
137 // No readers waiting, the lock is released
140 // There was a thread waiting for write, just pass the lock
145 pub unsafe fn write_unlock(&self) {
146 let rguard = self.readers.lock();
147 let wguard = self.writer.lock();
148 self.__write_unlock(rguard, wguard);
151 // only used by __rust_rwlock_unlock below
153 unsafe fn unlock(&self) {
154 let rguard = self.readers.lock();
155 let wguard = self.writer.lock();
156 if *wguard.lock_var() == true {
157 self.__write_unlock(rguard, wguard);
159 self.__read_unlock(rguard, wguard);
164 pub unsafe fn destroy(&self) {}
167 const EINVAL: i32 = 22;
169 // used by libunwind port
171 pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RWLock) -> i32 {
180 pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RWLock) -> i32 {
188 pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RWLock) -> i32 {
196 // the following functions are also used by the libunwind port. They're
197 // included here to make sure parallel codegen and LTO don't mess things up.
199 pub unsafe extern "C" fn __rust_print_err(m: *mut u8, s: i32) {
203 let buf = slice::from_raw_parts(m as *const u8, s as _);
204 if let Ok(s) = str::from_utf8(&buf[..buf.iter().position(|&b| b == 0).unwrap_or(buf.len())]) {
210 pub unsafe extern "C" fn __rust_abort() {
211 ::sys::abort_internal();
215 pub unsafe extern "C" fn __rust_c_alloc(size: usize, align: usize) -> *mut u8 {
216 alloc::alloc(Layout::from_size_align_unchecked(size, align))
220 pub unsafe extern "C" fn __rust_c_dealloc(ptr: *mut u8, size: usize, align: usize) {
221 alloc::dealloc(ptr, Layout::from_size_align_unchecked(size, align))
228 use core::array::FixedSizeArray;
229 use mem::MaybeUninit;
232 // The below test verifies that the bytes of initialized RWLock are the ones
233 // we use in libunwind.
234 // If they change we need to update src/UnwindRustSgx.h in libunwind.
236 fn test_c_rwlock_initializer() {
237 const RWLOCK_INIT: &[u8] = &[
238 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
239 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
240 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
241 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
242 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
243 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
244 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
245 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
246 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
247 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
248 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
249 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
250 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
251 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
252 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
253 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
256 let mut init = MaybeUninit::<RWLock>::zeroed();
257 init.set(RWLock::new());
259 mem::transmute::<_, [u8; 128]>(init.into_inner()).as_slice(),