3 use crate::mem::MaybeUninit;
4 use crate::sync::atomic::{AtomicU32, Ordering};
5 use crate::sys::cloudabi::abi;
6 use crate::sys::rwlock::{self, RWLock};
10 static __pthread_thread_id: abi::tid;
13 // Implement Mutex using an RWLock. This doesn't introduce any
14 // performance overhead in this environment, as the operations would be
15 // implemented identically.
16 pub struct Mutex(RWLock);
18 pub type MovableMutex = Mutex;
20 pub unsafe fn raw(m: &Mutex) -> &AtomicU32 {
25 pub const fn new() -> Mutex {
29 pub unsafe fn init(&mut self) {
30 // This function should normally reinitialize the mutex after
31 // moving it to a different memory address. This implementation
32 // does not require adjustments after moving.
35 pub unsafe fn try_lock(&self) -> bool {
39 pub unsafe fn lock(&self) {
43 pub unsafe fn unlock(&self) {
47 pub unsafe fn destroy(&self) {
52 pub struct ReentrantMutex {
57 unsafe impl Send for ReentrantMutex {}
58 unsafe impl Sync for ReentrantMutex {}
61 pub const unsafe fn uninitialized() -> ReentrantMutex {
62 ReentrantMutex { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0), recursion: Cell::new(0) }
65 pub unsafe fn init(&self) {}
67 pub unsafe fn try_lock(&self) -> bool {
68 // Attempt to acquire the lock.
69 if let Err(old) = self.lock.compare_exchange(
71 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
75 // If we fail to acquire the lock, it may be the case
76 // that we've already acquired it and may need to recurse.
77 if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
78 self.recursion.set(self.recursion.get() + 1);
85 assert_eq!(self.recursion.get(), 0, "Mutex has invalid recursion count");
90 pub unsafe fn lock(&self) {
92 // Call into the kernel to acquire a write lock.
93 let lock = &self.lock as *const AtomicU32;
94 let subscription = abi::subscription {
95 type_: abi::eventtype::LOCK_WRLOCK,
96 union: abi::subscription_union {
97 lock: abi::subscription_lock {
98 lock: lock as *mut abi::lock,
99 lock_scope: abi::scope::PRIVATE,
104 let mut event = MaybeUninit::<abi::event>::uninit();
105 let mut nevents = MaybeUninit::<usize>::uninit();
106 // SAFE: The caller must to ensure that `event` and `nevents` are initialized.
108 unsafe { abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr()) };
109 assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex");
110 let event = event.assume_init();
111 assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex");
115 pub unsafe fn unlock(&self) {
117 self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
118 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
119 "This mutex is locked by a different thread"
122 let r = self.recursion.get();
124 self.recursion.set(r - 1);
128 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
129 abi::LOCK_UNLOCKED.0,
135 // Lock is managed by kernelspace. Call into the kernel
136 // to unblock waiting threads.
137 let ret = abi::lock_unlock(
138 &self.lock as *const AtomicU32 as *mut abi::lock,
141 assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
145 pub unsafe fn destroy(&self) {
147 self.lock.load(Ordering::Relaxed),
148 abi::LOCK_UNLOCKED.0,
149 "Attempted to destroy locked mutex"
151 assert_eq!(self.recursion.get(), 0, "Recursion counter invalid");