2 use crate::mem::MaybeUninit;
3 use crate::sync::atomic::{AtomicU32, Ordering};
4 use crate::sys::cloudabi::abi;
8 static __pthread_thread_id: abi::tid;
12 static mut RDLOCKS_ACQUIRED: u32 = 0;
18 pub unsafe fn raw(r: &RWLock) -> &AtomicU32 {
22 unsafe impl Send for RWLock {}
23 unsafe impl Sync for RWLock {}
26 pub const fn new() -> RWLock {
27 RWLock { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0) }
30 pub unsafe fn try_read(&self) -> bool {
31 let mut old = abi::LOCK_UNLOCKED.0;
33 self.lock.compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed)
35 if (cur & abi::LOCK_WRLOCKED.0) != 0 {
36 // Another thread already has a write lock.
38 old & !abi::LOCK_KERNEL_MANAGED.0,
39 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
40 "Attempted to acquire a read lock while holding a write lock"
43 } else if (old & abi::LOCK_KERNEL_MANAGED.0) != 0 && RDLOCKS_ACQUIRED == 0 {
44 // Lock has threads waiting for the lock. Only acquire
45 // the lock if we have already acquired read locks. In
46 // that case, it is justified to acquire this lock to
47 // prevent a deadlock.
53 RDLOCKS_ACQUIRED += 1;
57 pub unsafe fn read(&self) {
59 // Call into the kernel to acquire a read lock.
60 let subscription = abi::subscription {
61 type_: abi::eventtype::LOCK_RDLOCK,
62 union: abi::subscription_union {
63 lock: abi::subscription_lock {
64 lock: &self.lock as *const AtomicU32 as *mut abi::lock,
65 lock_scope: abi::scope::PRIVATE,
70 let mut event = MaybeUninit::<abi::event>::uninit();
71 let mut nevents = MaybeUninit::<usize>::uninit();
72 let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr());
73 assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire read lock");
74 let event = event.assume_init();
75 assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire read lock");
77 RDLOCKS_ACQUIRED += 1;
81 pub unsafe fn read_unlock(&self) {
82 // Perform a read unlock. We can do this in userspace, except when
83 // other threads are blocked and we are performing the last unlock.
84 // In that case, call into the kernel.
86 // Other threads may attempt to increment the read lock count,
87 // meaning that the call into the kernel could be spurious. To
88 // prevent this from happening, upgrade to a write lock first. This
89 // allows us to call into the kernel, having the guarantee that the
90 // lock value will not change in the meantime.
91 assert!(RDLOCKS_ACQUIRED > 0, "Bad lock count");
94 if old == 1 | abi::LOCK_KERNEL_MANAGED.0 {
95 // Last read lock while threads are waiting. Attempt to upgrade
96 // to a write lock before calling into the kernel to unlock.
97 if let Err(cur) = self.lock.compare_exchange_weak(
99 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 | abi::LOCK_KERNEL_MANAGED.0,
105 // Call into the kernel to unlock.
106 let ret = abi::lock_unlock(
107 &self.lock as *const AtomicU32 as *mut abi::lock,
110 assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock");
114 // No threads waiting or not the last read lock. Just decrement
115 // the read lock count.
116 assert_ne!(old & !abi::LOCK_KERNEL_MANAGED.0, 0, "This rwlock is not locked");
118 old & abi::LOCK_WRLOCKED.0,
120 "Attempted to read-unlock a write-locked rwlock"
122 if let Err(cur) = self.lock.compare_exchange_weak(
135 RDLOCKS_ACQUIRED -= 1;
138 pub unsafe fn try_write(&self) -> bool {
139 // Attempt to acquire the lock.
140 if let Err(old) = self.lock.compare_exchange(
141 abi::LOCK_UNLOCKED.0,
142 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
146 // Failure. Crash upon recursive acquisition.
148 old & !abi::LOCK_KERNEL_MANAGED.0,
149 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
150 "Attempted to recursive write-lock a rwlock",
159 pub unsafe fn write(&self) {
160 if !self.try_write() {
161 // Call into the kernel to acquire a write lock.
162 let subscription = abi::subscription {
163 type_: abi::eventtype::LOCK_WRLOCK,
164 union: abi::subscription_union {
165 lock: abi::subscription_lock {
166 lock: &self.lock as *const AtomicU32 as *mut abi::lock,
167 lock_scope: abi::scope::PRIVATE,
172 let mut event = MaybeUninit::<abi::event>::uninit();
173 let mut nevents = MaybeUninit::<usize>::uninit();
174 let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr());
175 assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire write lock");
176 let event = event.assume_init();
177 assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire write lock");
181 pub unsafe fn write_unlock(&self) {
183 self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
184 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
185 "This rwlock is not write-locked by this thread"
191 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
192 abi::LOCK_UNLOCKED.0,
198 // Lock is managed by kernelspace. Call into the kernel
199 // to unblock waiting threads.
200 let ret = abi::lock_unlock(
201 &self.lock as *const AtomicU32 as *mut abi::lock,
204 assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock");
208 pub unsafe fn destroy(&self) {
210 self.lock.load(Ordering::Relaxed),
211 abi::LOCK_UNLOCKED.0,
212 "Attempted to destroy locked rwlock"