1 use crate::arch::wasm32;
2 use crate::cell::UnsafeCell;
4 use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst};
5 use crate::sys::thread;
11 pub type MovableMutex = Mutex;
13 // Mutexes have a pretty simple implementation where they contain an `i32`
14 // internally that is 0 when unlocked and 1 when the mutex is locked.
15 // Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and
16 // if it fails it then waits for a notification. Releasing a lock is then done
17 // by swapping in 0 and then notifying any waiters, if present.
20 pub const fn new() -> Mutex {
21 Mutex { locked: AtomicUsize::new(0) }
25 pub unsafe fn init(&mut self) {
29 pub unsafe fn lock(&self) {
30 while !self.try_lock() {
31 // SAFETY: the caller must uphold the safety contract for `memory_atomic_wait32`.
33 wasm32::memory_atomic_wait32(
35 1, // we expect our mutex is locked
36 -1, // wait infinitely
39 // we should have either woke up (0) or got a not-equal due to a
40 // race (1). We should never time out (2)
41 debug_assert!(val == 0 || val == 1);
45 pub unsafe fn unlock(&self) {
46 let prev = self.locked.swap(0, SeqCst);
47 debug_assert_eq!(prev, 1);
48 wasm32::memory_atomic_notify(self.ptr(), 1); // wake up one waiter, if any
52 pub unsafe fn try_lock(&self) -> bool {
53 self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
57 pub unsafe fn destroy(&self) {
62 fn ptr(&self) -> *mut i32 {
63 assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
64 self.locked.as_mut_ptr() as *mut i32
68 pub struct ReentrantMutex {
70 recursions: UnsafeCell<u32>,
73 unsafe impl Send for ReentrantMutex {}
74 unsafe impl Sync for ReentrantMutex {}
76 // Reentrant mutexes are similarly implemented to mutexs above except that
77 // instead of "1" meaning unlocked we use the id of a thread to represent
78 // whether it has locked a mutex. That way we have an atomic counter which
79 // always holds the id of the thread that currently holds the lock (or 0 if the
82 // Once a thread acquires a lock recursively, which it detects by looking at
83 // the value that's already there, it will update a local `recursions` counter
84 // in a nonatomic fashion (as we hold the lock). The lock is then fully
85 // released when this recursion counter reaches 0.
88 pub const unsafe fn uninitialized() -> ReentrantMutex {
89 ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) }
92 pub unsafe fn init(&self) {
96 pub unsafe fn lock(&self) {
97 let me = thread::my_id();
98 while let Err(owner) = self._try_lock(me) {
99 // SAFETY: the caller must gurantee that `self.ptr()` and `owner` are valid i32.
100 let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) };
101 debug_assert!(val == 0 || val == 1);
106 pub unsafe fn try_lock(&self) -> bool {
107 unsafe { self._try_lock(thread::my_id()).is_ok() }
111 unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> {
112 let id = id.checked_add(1).unwrap();
113 match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
114 // we transitioned from unlocked to locked
116 debug_assert_eq!(*self.recursions.get(), 0);
120 // we currently own this lock, so let's update our count and return
122 Err(n) if n == id => {
123 *self.recursions.get() += 1;
127 // Someone else owns the lock, let our caller take care of it
128 Err(other) => Err(other),
132 pub unsafe fn unlock(&self) {
133 // If we didn't ever recursively lock the lock then we fully unlock the
134 // mutex and wake up a waiter, if any. Otherwise we decrement our
135 // recursive counter and let some one else take care of the zero.
136 match *self.recursions.get() {
138 self.owner.swap(0, SeqCst);
139 // SAFETY: the caller must gurantee that `self.ptr()` is valid i32.
141 wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1);
142 } // wake up one waiter, if any
144 ref mut n => *n -= 1,
148 pub unsafe fn destroy(&self) {
153 fn ptr(&self) -> *mut i32 {
154 self.owner.as_mut_ptr() as *mut i32