1 #[cfg(all(test, not(target_os = "emscripten")))]
4 use crate::cell::UnsafeCell;
5 use crate::marker::PhantomPinned;
7 use crate::panic::{RefUnwindSafe, UnwindSafe};
9 use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
10 use crate::sys::locks as sys;
12 /// A re-entrant mutual exclusion
14 /// This mutex will block *other* threads waiting for the lock to become
15 /// available. The thread which has already locked the mutex can lock it
16 /// multiple times without blocking, preventing a common source of deadlocks.
18 /// This is used by stdout().lock() and friends.
20 /// ## Implementation details
22 /// The 'owner' field tracks which thread has locked the mutex.
24 /// We use current_thread_unique_ptr() as the thread identifier,
25 /// which is just the address of a thread local variable.
27 /// If `owner` is set to the identifier of the current thread,
28 /// we assume the mutex is already locked and instead of locking it again,
29 /// we increment `lock_count`.
31 /// When unlocking, we decrement `lock_count`, and only unlock the mutex when
34 /// `lock_count` is protected by the mutex and only accessed by the thread that has
35 /// locked the mutex, so needs no synchronization.
37 /// `owner` can be checked by other threads that want to see if they already
38 /// hold the lock, so needs to be atomic. If it compares equal, we're on the
39 /// same thread that holds the mutex and memory access can use relaxed ordering
40 /// since we're not dealing with multiple threads. If it compares unequal,
41 /// synchronization is left to the mutex, making relaxed memory ordering for
42 /// the `owner` field fine in all cases.
43 pub struct ReentrantMutex<T> {
46 lock_count: UnsafeCell<u32>,
48 _pinned: PhantomPinned,
51 unsafe impl<T: Send> Send for ReentrantMutex<T> {}
52 unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
54 impl<T> UnwindSafe for ReentrantMutex<T> {}
55 impl<T> RefUnwindSafe for ReentrantMutex<T> {}
57 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
58 /// dropped (falls out of scope), the lock will be unlocked.
60 /// The data protected by the mutex can be accessed through this guard via its
61 /// Deref implementation.
65 /// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
66 /// because implementation of the trait would violate Rust’s reference aliasing
67 /// rules. Use interior mutability (usually `RefCell`) in order to mutate the
69 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
70 pub struct ReentrantMutexGuard<'a, T: 'a> {
71 lock: Pin<&'a ReentrantMutex<T>>,
74 impl<T> !Send for ReentrantMutexGuard<'_, T> {}
76 impl<T> ReentrantMutex<T> {
77 /// Creates a new reentrant mutex in an unlocked state.
81 /// This function is unsafe because it is required that `init` is called
82 /// once this mutex is in its final resting place, and only then are the
83 /// lock/unlock methods safe.
84 pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
86 mutex: sys::Mutex::new(),
87 owner: AtomicUsize::new(0),
88 lock_count: UnsafeCell::new(0),
90 _pinned: PhantomPinned,
94 /// Initializes this mutex so it's ready for use.
98 /// Unsafe to call more than once, and must be called after this will no
99 /// longer move in memory.
100 pub unsafe fn init(self: Pin<&mut Self>) {
101 self.get_unchecked_mut().mutex.init()
104 /// Acquires a mutex, blocking the current thread until it is able to do so.
106 /// This function will block the caller until it is available to acquire the mutex.
107 /// Upon returning, the thread is the only thread with the mutex held. When the thread
108 /// calling this method already holds the lock, the call shall succeed without
113 /// If another user of this mutex panicked while holding the mutex, then
114 /// this call will return failure if the mutex would otherwise be
116 pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
117 let this_thread = current_thread_unique_ptr();
118 // Safety: We only touch lock_count when we own the lock,
119 // and since self is pinned we can safely call the lock() on the mutex.
121 if self.owner.load(Relaxed) == this_thread {
122 self.increment_lock_count();
125 self.owner.store(this_thread, Relaxed);
126 debug_assert_eq!(*self.lock_count.get(), 0);
127 *self.lock_count.get() = 1;
130 ReentrantMutexGuard { lock: self }
133 /// Attempts to acquire this lock.
135 /// If the lock could not be acquired at this time, then `Err` is returned.
136 /// Otherwise, an RAII guard is returned.
138 /// This function does not block.
142 /// If another user of this mutex panicked while holding the mutex, then
143 /// this call will return failure if the mutex would otherwise be
145 pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
146 let this_thread = current_thread_unique_ptr();
147 // Safety: We only touch lock_count when we own the lock,
148 // and since self is pinned we can safely call the try_lock on the mutex.
150 if self.owner.load(Relaxed) == this_thread {
151 self.increment_lock_count();
152 Some(ReentrantMutexGuard { lock: self })
153 } else if self.mutex.try_lock() {
154 self.owner.store(this_thread, Relaxed);
155 debug_assert_eq!(*self.lock_count.get(), 0);
156 *self.lock_count.get() = 1;
157 Some(ReentrantMutexGuard { lock: self })
164 unsafe fn increment_lock_count(&self) {
165 *self.lock_count.get() = (*self.lock_count.get())
167 .expect("lock count overflow in reentrant mutex");
171 impl<T> Deref for ReentrantMutexGuard<'_, T> {
174 fn deref(&self) -> &T {
179 impl<T> Drop for ReentrantMutexGuard<'_, T> {
182 // Safety: We own the lock, and the lock is pinned.
184 *self.lock.lock_count.get() -= 1;
185 if *self.lock.lock_count.get() == 0 {
186 self.lock.owner.store(0, Relaxed);
187 self.lock.mutex.unlock();
193 /// Get an address that is unique per running thread.
195 /// This can be used as a non-null usize-sized ID.
196 pub fn current_thread_unique_ptr() -> usize {
197 // Use a non-drop type to make sure it's still available during thread destruction.
198 thread_local! { static X: u8 = const { 0 } }
199 X.with(|x| <*const _>::addr(x))