1 use alloc::{self, Layout};
6 use super::waitqueue::{
7 try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
12 readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
13 writer: SpinMutex<WaitVariable<bool>>,
16 // Below is to check at compile time, that RWLock has size of 128 bytes.
18 unsafe fn rw_lock_size_assert(r: RWLock) {
19 mem::transmute::<RWLock, [u8; 128]>(r);
23 pub const fn new() -> RWLock {
25 readers: SpinMutex::new(WaitVariable::new(None)),
26 writer: SpinMutex::new(WaitVariable::new(false)),
31 pub unsafe fn read(&self) {
32 let mut rguard = self.readers.lock();
33 let wguard = self.writer.lock();
34 if *wguard.lock_var() || !wguard.queue_empty() {
35 // Another thread has or is waiting for the write lock, wait
37 WaitQueue::wait(rguard);
38 // Another thread has passed the lock to us
40 // No waiting writers, acquire the read lock
41 *rguard.lock_var_mut() =
42 NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
47 pub unsafe fn try_read(&self) -> bool {
48 let mut rguard = try_lock_or_false!(self.readers);
49 let wguard = try_lock_or_false!(self.writer);
50 if *wguard.lock_var() || !wguard.queue_empty() {
51 // Another thread has or is waiting for the write lock
54 // No waiting writers, acquire the read lock
55 *rguard.lock_var_mut() =
56 NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
62 pub unsafe fn write(&self) {
63 let rguard = self.readers.lock();
64 let mut wguard = self.writer.lock();
65 if *wguard.lock_var() || rguard.lock_var().is_some() {
66 // Another thread has the lock, wait
68 WaitQueue::wait(wguard);
69 // Another thread has passed the lock to us
71 // We are just now obtaining the lock
72 *wguard.lock_var_mut() = true;
77 pub unsafe fn try_write(&self) -> bool {
78 let rguard = try_lock_or_false!(self.readers);
79 let mut wguard = try_lock_or_false!(self.writer);
80 if *wguard.lock_var() || rguard.lock_var().is_some() {
81 // Another thread has the lock
84 // We are just now obtaining the lock
85 *wguard.lock_var_mut() = true;
91 unsafe fn __read_unlock(
93 mut rguard: SpinMutexGuard<WaitVariable<Option<NonZeroUsize>>>,
94 wguard: SpinMutexGuard<WaitVariable<bool>>,
96 *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1);
97 if rguard.lock_var().is_some() {
98 // There are other active readers
100 if let Ok(mut wguard) = WaitQueue::notify_one(wguard) {
101 // A writer was waiting, pass the lock
102 *wguard.lock_var_mut() = true;
104 // No writers were waiting, the lock is released
105 assert!(rguard.queue_empty());
111 pub unsafe fn read_unlock(&self) {
112 let rguard = self.readers.lock();
113 let wguard = self.writer.lock();
114 self.__read_unlock(rguard, wguard);
118 unsafe fn __write_unlock(
120 rguard: SpinMutexGuard<WaitVariable<Option<NonZeroUsize>>>,
121 wguard: SpinMutexGuard<WaitVariable<bool>>,
123 if let Err(mut wguard) = WaitQueue::notify_one(wguard) {
124 // No writers waiting, release the write lock
125 *wguard.lock_var_mut() = false;
126 if let Ok(mut rguard) = WaitQueue::notify_all(rguard) {
127 // One or more readers were waiting, pass the lock to them
128 if let NotifiedTcs::All { count } = rguard.notified_tcs() {
129 *rguard.lock_var_mut() = Some(count)
131 unreachable!() // called notify_all
134 // No readers waiting, the lock is released
137 // There was a thread waiting for write, just pass the lock
142 pub unsafe fn write_unlock(&self) {
143 let rguard = self.readers.lock();
144 let wguard = self.writer.lock();
145 self.__write_unlock(rguard, wguard);
148 // only used by __rust_rwlock_unlock below
150 unsafe fn unlock(&self) {
151 let rguard = self.readers.lock();
152 let wguard = self.writer.lock();
153 if *wguard.lock_var() == true {
154 self.__write_unlock(rguard, wguard);
156 self.__read_unlock(rguard, wguard);
161 pub unsafe fn destroy(&self) {}
164 const EINVAL: i32 = 22;
166 // used by libunwind port
168 pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RWLock) -> i32 {
177 pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RWLock) -> i32 {
185 pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RWLock) -> i32 {
193 // the following functions are also used by the libunwind port. They're
194 // included here to make sure parallel codegen and LTO don't mess things up.
196 pub unsafe extern "C" fn __rust_print_err(m: *mut u8, s: i32) {
200 let buf = slice::from_raw_parts(m as *const u8, s as _);
201 if let Ok(s) = str::from_utf8(&buf[..buf.iter().position(|&b| b == 0).unwrap_or(buf.len())]) {
207 pub unsafe extern "C" fn __rust_abort() {
208 ::sys::abort_internal();
212 pub unsafe extern "C" fn __rust_c_alloc(size: usize, align: usize) -> *mut u8 {
213 alloc::alloc(Layout::from_size_align_unchecked(size, align))
217 pub unsafe extern "C" fn __rust_c_dealloc(ptr: *mut u8, size: usize, align: usize) {
218 alloc::dealloc(ptr, Layout::from_size_align_unchecked(size, align))
225 use core::array::FixedSizeArray;
226 use mem::MaybeUninit;
229 // The below test verifies that the bytes of initialized RWLock are the ones
230 // we use in libunwind.
231 // If they change we need to update src/UnwindRustSgx.h in libunwind.
233 fn test_c_rwlock_initializer() {
234 const RWLOCK_INIT: &[u8] = &[
235 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
236 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
237 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
238 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
239 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
240 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
241 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
242 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
243 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
244 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
245 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
246 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
247 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
248 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
249 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
250 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
253 let mut init = MaybeUninit::<RWLock>::zeroed();
254 init.set(RWLock::new());
256 mem::transmute::<_, [u8; 128]>(init.into_inner()).as_slice(),