1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops::{Deref, DerefMut};
19 use sys_common::mutex as sys;
20 use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};
22 /// A mutual exclusion primitive useful for protecting shared data
24 /// This mutex will block threads waiting for the lock to become available. The
25 /// mutex can also be statically initialized or created via a `new`
26 /// constructor. Each mutex has a type parameter which represents the data that
27 /// it is protecting. The data can only be accessed through the RAII guards
28 /// returned from `lock` and `try_lock`, which guarantees that the data is only
29 /// ever accessed when the mutex is locked.
33 /// The mutexes in this module implement a strategy called "poisoning" where a
34 /// mutex is considered poisoned whenever a thread panics while holding the
35 /// lock. Once a mutex is poisoned, all other threads are unable to access the
36 /// data by default as it is likely tainted (some invariant is not being
39 /// For a mutex, this means that the `lock` and `try_lock` methods return a
40 /// `Result` which indicates whether a mutex has been poisoned or not. Most
41 /// usage of a mutex will simply `unwrap()` these results, propagating panics
42 /// among threads to ensure that a possibly invalid invariant is not witnessed.
44 /// A poisoned mutex, however, does not prevent all access to the underlying
45 /// data. The `PoisonError` type has an `into_inner` method which will return
46 /// the guard that would have otherwise been returned on a successful lock. This
47 /// allows access to the data, despite the lock being poisoned.
52 /// use std::sync::{Arc, Mutex};
54 /// use std::sync::mpsc::channel;
56 /// const N: usize = 10;
58 /// // Spawn a few threads to increment a shared variable (non-atomically), and
59 /// // let the main thread know once all increments are done.
61 /// // Here we're using an Arc to share memory among threads, and the data inside
62 /// // the Arc is protected with a mutex.
63 /// let data = Arc::new(Mutex::new(0));
65 /// let (tx, rx) = channel();
67 /// let (data, tx) = (data.clone(), tx.clone());
68 /// thread::spawn(move || {
69 /// // The shared static can only be accessed once the lock is held.
70 /// // Our non-atomic increment is safe because we're the only thread
71 /// // which can access the shared state when the lock is held.
73 /// // We unwrap() the return value to assert that we are not expecting
74 /// // threads to ever fail while holding the lock.
75 /// let mut data = data.lock().unwrap();
78 /// tx.send(()).unwrap();
80 /// // the lock is unlocked here when `data` goes out of scope.
84 /// rx.recv().unwrap();
87 /// To recover from a poisoned mutex:
90 /// use std::sync::{Arc, Mutex};
93 /// let lock = Arc::new(Mutex::new(0_u32));
94 /// let lock2 = lock.clone();
96 /// let _ = thread::spawn(move || -> () {
97 /// // This thread will acquire the mutex first, unwrapping the result of
98 /// // `lock` because the lock has not been poisoned.
99 /// let _lock = lock2.lock().unwrap();
101 /// // This panic while holding the lock (`_guard` is in scope) will poison
106 /// // The lock is poisoned by this point, but the returned result can be
107 /// // pattern matched on to return the underlying guard on both branches.
108 /// let mut guard = match lock.lock() {
109 /// Ok(guard) => guard,
110 /// Err(poisoned) => poisoned.into_inner(),
115 #[stable(feature = "rust1", since = "1.0.0")]
116 pub struct Mutex<T: ?Sized> {
117 // Note that this static mutex is in a *box*, not inlined into the struct
118 // itself. Once a native mutex has been used once, its address can never
119 // change (it can't be moved). This mutex type can be safely moved at any
120 // time, so to ensure that the native mutex is used correctly we box the
121 // inner lock to give it a constant address.
122 inner: Box<StaticMutex>,
126 // these are the only places where `T: Send` matters; all other
127 // functionality works fine on a single thread.
128 unsafe impl<T: ?Sized + Send> Send for Mutex<T> { }
130 unsafe impl<T: ?Sized + Send> Sync for Mutex<T> { }
132 /// The static mutex type is provided to allow for static allocation of mutexes.
134 /// Note that this is a separate type because using a Mutex correctly means that
135 /// it needs to have a destructor run. In Rust, statics are not allowed to have
136 /// destructors. As a result, a `StaticMutex` has one extra method when compared
137 /// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
138 /// documentation can be found directly on the method.
143 /// #![feature(static_mutex)]
145 /// use std::sync::{StaticMutex, MUTEX_INIT};
147 /// static LOCK: StaticMutex = MUTEX_INIT;
150 /// let _g = LOCK.lock().unwrap();
151 /// // do some productive work
153 /// // lock is unlocked here.
155 #[unstable(feature = "static_mutex",
156 reason = "may be merged with Mutex in the future",
158 pub struct StaticMutex {
160 poison: poison::Flag,
163 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
164 /// dropped (falls out of scope), the lock will be unlocked.
166 /// The data protected by the mutex can be access through this guard via its
167 /// `Deref` and `DerefMut` implementations
169 #[stable(feature = "rust1", since = "1.0.0")]
170 pub struct MutexGuard<'a, T: ?Sized + 'a> {
171 // funny underscores due to how Deref/DerefMut currently work (they
172 // disregard field privacy).
173 __lock: &'a StaticMutex,
174 __data: &'a UnsafeCell<T>,
175 __poison: poison::Guard,
178 impl<'a, T: ?Sized> !marker::Send for MutexGuard<'a, T> {}
180 /// Static initialization of a mutex. This constant can be used to initialize
181 /// other mutex constants.
182 #[unstable(feature = "static_mutex",
183 reason = "may be merged with Mutex in the future",
185 pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
188 /// Creates a new mutex in an unlocked state ready for use.
189 #[stable(feature = "rust1", since = "1.0.0")]
190 pub fn new(t: T) -> Mutex<T> {
192 inner: box StaticMutex::new(),
193 data: UnsafeCell::new(t),
198 impl<T: ?Sized> Mutex<T> {
199 /// Acquires a mutex, blocking the current thread until it is able to do so.
201 /// This function will block the local thread until it is available to acquire
202 /// the mutex. Upon returning, the thread is the only thread with the mutex
203 /// held. An RAII guard is returned to allow scoped unlock of the lock. When
204 /// the guard goes out of scope, the mutex will be unlocked.
208 /// If another user of this mutex panicked while holding the mutex, then
209 /// this call will return an error once the mutex is acquired.
210 #[stable(feature = "rust1", since = "1.0.0")]
211 pub fn lock(&self) -> LockResult<MutexGuard<T>> {
212 unsafe { self.inner.lock.lock() }
213 MutexGuard::new(&*self.inner, &self.data)
216 /// Attempts to acquire this lock.
218 /// If the lock could not be acquired at this time, then `Err` is returned.
219 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
220 /// guard is dropped.
222 /// This function does not block.
226 /// If another user of this mutex panicked while holding the mutex, then
227 /// this call will return failure if the mutex would otherwise be
229 #[stable(feature = "rust1", since = "1.0.0")]
230 pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
231 if unsafe { self.inner.lock.try_lock() } {
232 Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
234 Err(TryLockError::WouldBlock)
238 /// Determines whether the lock is poisoned.
240 /// If another thread is active, the lock can still become poisoned at any
241 /// time. You should not trust a `false` value for program correctness
242 /// without additional synchronization.
244 #[stable(feature = "sync_poison", since = "1.2.0")]
245 pub fn is_poisoned(&self) -> bool {
246 self.inner.poison.get()
249 /// Consumes this mutex, returning the underlying data.
253 /// If another user of this mutex panicked while holding the mutex, then
254 /// this call will return an error instead.
255 #[unstable(feature = "mutex_into_inner", reason = "recently added", issue = "28968")]
256 pub fn into_inner(self) -> LockResult<T> where T: Sized {
257 // We know statically that there are no outstanding references to
258 // `self` so there's no need to lock the inner StaticMutex.
260 // To get the inner value, we'd like to call `data.into_inner()`,
261 // but because `Mutex` impl-s `Drop`, we can't move out of it, so
262 // we'll have to destructure it manually instead.
264 // Like `let Mutex { inner, data } = self`.
265 let (inner, data) = {
266 let Mutex { ref inner, ref data } = self;
267 (ptr::read(inner), ptr::read(data))
270 inner.lock.destroy(); // Keep in sync with the `Drop` impl.
272 poison::map_result(inner.poison.borrow(), |_| data.into_inner())
276 /// Returns a mutable reference to the underlying data.
278 /// Since this call borrows the `Mutex` mutably, no actual locking needs to
279 /// take place---the mutable borrow statically guarantees no locks exist.
283 /// If another user of this mutex panicked while holding the mutex, then
284 /// this call will return an error instead.
285 #[unstable(feature = "mutex_get_mut", reason = "recently added", issue = "28968")]
286 pub fn get_mut(&mut self) -> LockResult<&mut T> {
287 // We know statically that there are no other references to `self`, so
288 // there's no need to lock the inner StaticMutex.
289 let data = unsafe { &mut *self.data.get() };
290 poison::map_result(self.inner.poison.borrow(), |_| data )
294 #[stable(feature = "rust1", since = "1.0.0")]
295 impl<T: ?Sized> Drop for Mutex<T> {
297 // This is actually safe b/c we know that there is no further usage of
298 // this mutex (it's up to the user to arrange for a mutex to get
299 // dropped, that's not our job)
301 // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
302 unsafe { self.inner.lock.destroy() }
306 #[stable(feature = "rust1", since = "1.0.0")]
307 impl<T: ?Sized + fmt::Debug + 'static> fmt::Debug for Mutex<T> {
308 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
309 match self.try_lock() {
310 Ok(guard) => write!(f, "Mutex {{ data: {:?} }}", &*guard),
311 Err(TryLockError::Poisoned(err)) => {
312 write!(f, "Mutex {{ data: Poisoned({:?}) }}", &**err.get_ref())
314 Err(TryLockError::WouldBlock) => write!(f, "Mutex {{ <locked> }}")
319 struct Dummy(UnsafeCell<()>);
320 unsafe impl Sync for Dummy {}
321 static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
323 #[unstable(feature = "static_mutex",
324 reason = "may be merged with Mutex in the future",
327 /// Creates a new mutex in an unlocked state ready for use.
328 pub const fn new() -> StaticMutex {
330 lock: sys::Mutex::new(),
331 poison: poison::Flag::new(),
335 /// Acquires this lock, see `Mutex::lock`
337 pub fn lock(&'static self) -> LockResult<MutexGuard<()>> {
338 unsafe { self.lock.lock() }
339 MutexGuard::new(self, &DUMMY.0)
342 /// Attempts to grab this lock, see `Mutex::try_lock`
344 pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
345 if unsafe { self.lock.try_lock() } {
346 Ok(try!(MutexGuard::new(self, &DUMMY.0)))
348 Err(TryLockError::WouldBlock)
352 /// Deallocates resources associated with this static mutex.
354 /// This method is unsafe because it provides no guarantees that there are
355 /// no active users of this mutex, and safety is not guaranteed if there are
356 /// active users of this mutex.
358 /// This method is required to ensure that there are no memory leaks on
359 /// *all* platforms. It may be the case that some platforms do not leak
360 /// memory if this method is not called, but this is not guaranteed to be
361 /// true on all platforms.
362 pub unsafe fn destroy(&'static self) {
367 impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
369 fn new(lock: &'mutex StaticMutex, data: &'mutex UnsafeCell<T>)
370 -> LockResult<MutexGuard<'mutex, T>> {
371 poison::map_result(lock.poison.borrow(), |guard| {
381 #[stable(feature = "rust1", since = "1.0.0")]
382 impl<'mutex, T: ?Sized> Deref for MutexGuard<'mutex, T> {
385 fn deref(&self) -> &T {
386 unsafe { &*self.__data.get() }
390 #[stable(feature = "rust1", since = "1.0.0")]
391 impl<'mutex, T: ?Sized> DerefMut for MutexGuard<'mutex, T> {
392 fn deref_mut(&mut self) -> &mut T {
393 unsafe { &mut *self.__data.get() }
397 #[stable(feature = "rust1", since = "1.0.0")]
398 impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
402 self.__lock.poison.done(&self.__poison);
403 self.__lock.lock.unlock();
408 pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
412 pub fn guard_poison<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
420 use sync::mpsc::channel;
421 use sync::{Arc, Mutex, StaticMutex, Condvar};
422 use sync::atomic::{AtomicUsize, Ordering};
425 struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
427 #[derive(Eq, PartialEq, Debug)]
430 unsafe impl<T: Send> Send for Packet<T> {}
431 unsafe impl<T> Sync for Packet<T> {}
435 let m = Mutex::new(());
436 drop(m.lock().unwrap());
437 drop(m.lock().unwrap());
442 static M: StaticMutex = StaticMutex::new();
444 drop(M.lock().unwrap());
445 drop(M.lock().unwrap());
452 static M: StaticMutex = StaticMutex::new();
453 static mut CNT: u32 = 0;
460 let _g = M.lock().unwrap();
466 let (tx, rx) = channel();
468 let tx2 = tx.clone();
469 thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
470 let tx2 = tx.clone();
471 thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
478 assert_eq!(unsafe {CNT}, J * K * 2);
486 let m = Mutex::new(());
487 *m.try_lock().unwrap() = ();
491 fn test_into_inner() {
492 let m = Mutex::new(NonCopy(10));
493 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
497 fn test_into_inner_drop() {
498 struct Foo(Arc<AtomicUsize>);
501 self.0.fetch_add(1, Ordering::SeqCst);
504 let num_drops = Arc::new(AtomicUsize::new(0));
505 let m = Mutex::new(Foo(num_drops.clone()));
506 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
508 let _inner = m.into_inner().unwrap();
509 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
511 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
515 fn test_into_inner_poison() {
516 let m = Arc::new(Mutex::new(NonCopy(10)));
518 let _ = thread::spawn(move || {
519 let _lock = m2.lock().unwrap();
520 panic!("test panic in inner thread to poison mutex");
523 assert!(m.is_poisoned());
524 match Arc::try_unwrap(m).unwrap().into_inner() {
525 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
526 Ok(x) => panic!("into_inner of poisoned Mutex is Ok: {:?}", x),
532 let mut m = Mutex::new(NonCopy(10));
533 *m.get_mut().unwrap() = NonCopy(20);
534 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
538 fn test_get_mut_poison() {
539 let m = Arc::new(Mutex::new(NonCopy(10)));
541 let _ = thread::spawn(move || {
542 let _lock = m2.lock().unwrap();
543 panic!("test panic in inner thread to poison mutex");
546 assert!(m.is_poisoned());
547 match Arc::try_unwrap(m).unwrap().get_mut() {
548 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
549 Ok(x) => panic!("get_mut of poisoned Mutex is Ok: {:?}", x),
554 fn test_mutex_arc_condvar() {
555 let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
556 let packet2 = Packet(packet.0.clone());
557 let (tx, rx) = channel();
558 let _t = thread::spawn(move|| {
559 // wait until parent gets in
561 let &(ref lock, ref cvar) = &*packet2.0;
562 let mut lock = lock.lock().unwrap();
567 let &(ref lock, ref cvar) = &*packet.0;
568 let mut lock = lock.lock().unwrap();
569 tx.send(()).unwrap();
572 lock = cvar.wait(lock).unwrap();
577 fn test_arc_condvar_poison() {
578 let packet = Packet(Arc::new((Mutex::new(1), Condvar::new())));
579 let packet2 = Packet(packet.0.clone());
580 let (tx, rx) = channel();
582 let _t = thread::spawn(move || -> () {
584 let &(ref lock, ref cvar) = &*packet2.0;
585 let _g = lock.lock().unwrap();
587 // Parent should fail when it wakes up.
591 let &(ref lock, ref cvar) = &*packet.0;
592 let mut lock = lock.lock().unwrap();
593 tx.send(()).unwrap();
595 match cvar.wait(lock) {
598 assert_eq!(*lock, 1);
606 fn test_mutex_arc_poison() {
607 let arc = Arc::new(Mutex::new(1));
608 assert!(!arc.is_poisoned());
609 let arc2 = arc.clone();
610 let _ = thread::spawn(move|| {
611 let lock = arc2.lock().unwrap();
612 assert_eq!(*lock, 2);
614 assert!(arc.lock().is_err());
615 assert!(arc.is_poisoned());
619 fn test_mutex_arc_nested() {
620 // Tests nested mutexes and access
621 // to underlying data.
622 let arc = Arc::new(Mutex::new(1));
623 let arc2 = Arc::new(Mutex::new(arc));
624 let (tx, rx) = channel();
625 let _t = thread::spawn(move|| {
626 let lock = arc2.lock().unwrap();
627 let lock2 = lock.lock().unwrap();
628 assert_eq!(*lock2, 1);
629 tx.send(()).unwrap();
635 fn test_mutex_arc_access_in_unwind() {
636 let arc = Arc::new(Mutex::new(1));
637 let arc2 = arc.clone();
638 let _ = thread::spawn(move|| -> () {
642 impl Drop for Unwinder {
644 *self.i.lock().unwrap() += 1;
647 let _u = Unwinder { i: arc2 };
650 let lock = arc.lock().unwrap();
651 assert_eq!(*lock, 2);
655 fn test_mutex_unsized() {
656 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
658 let b = &mut *mutex.lock().unwrap();
662 let comp: &[i32] = &[4, 2, 5];
663 assert_eq!(&*mutex.lock().unwrap(), comp);