1 use crate::cell::UnsafeCell;
3 use crate::sync::atomic::{AtomicU32, Ordering};
4 use crate::sys::cloudabi::abi;
5 use crate::sys::rwlock::{self, RWLock};
9 static __pthread_thread_id: abi::tid;
12 // Implement Mutex using an RWLock. This doesn't introduce any
13 // performance overhead in this environment, as the operations would be
14 // implemented identically.
15 pub struct Mutex(RWLock);
17 pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 {
22 pub const fn new() -> Mutex {
26 pub unsafe fn init(&mut self) {
27 // This function should normally reinitialize the mutex after
28 // moving it to a different memory address. This implementation
29 // does not require adjustments after moving.
32 pub unsafe fn try_lock(&self) -> bool {
36 pub unsafe fn lock(&self) {
40 pub unsafe fn unlock(&self) {
44 pub unsafe fn destroy(&self) {
49 pub struct ReentrantMutex {
50 lock: UnsafeCell<AtomicU32>,
51 recursion: UnsafeCell<u32>,
55 pub unsafe fn uninitialized() -> ReentrantMutex {
59 pub unsafe fn init(&mut self) {
60 self.lock = UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
61 self.recursion = UnsafeCell::new(0);
64 pub unsafe fn try_lock(&self) -> bool {
65 // Attempt to acquire the lock.
66 let lock = self.lock.get();
67 let recursion = self.recursion.get();
68 if let Err(old) = (*lock).compare_exchange(
70 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
74 // If we fail to acquire the lock, it may be the case
75 // that we've already acquired it and may need to recurse.
76 if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
84 assert_eq!(*recursion, 0, "Mutex has invalid recursion count");
89 pub unsafe fn lock(&self) {
91 // Call into the kernel to acquire a write lock.
92 let lock = self.lock.get();
93 let subscription = abi::subscription {
94 type_: abi::eventtype::LOCK_WRLOCK,
95 union: abi::subscription_union {
96 lock: abi::subscription_lock {
97 lock: lock as *mut abi::lock,
98 lock_scope: abi::scope::PRIVATE,
103 let mut event: abi::event = mem::uninitialized();
104 let mut nevents: usize = mem::uninitialized();
105 let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
106 assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex");
107 assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex");
111 pub unsafe fn unlock(&self) {
112 let lock = self.lock.get();
113 let recursion = self.recursion.get();
115 (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
116 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
117 "This mutex is locked by a different thread"
124 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
125 abi::LOCK_UNLOCKED.0,
131 // Lock is managed by kernelspace. Call into the kernel
132 // to unblock waiting threads.
133 let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
134 assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
138 pub unsafe fn destroy(&self) {
139 let lock = self.lock.get();
140 let recursion = self.recursion.get();
142 (*lock).load(Ordering::Relaxed),
143 abi::LOCK_UNLOCKED.0,
144 "Attempted to destroy locked mutex"
146 assert_eq!(*recursion, 0, "Recursion counter invalid");