1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops::{Deref, DerefMut};
19 use sys_common::mutex as sys;
20 use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};
22 /// A mutual exclusion primitive useful for protecting shared data
24 /// This mutex will block threads waiting for the lock to become available. The
25 /// mutex can also be statically initialized or created via a `new`
26 /// constructor. Each mutex has a type parameter which represents the data that
27 /// it is protecting. The data can only be accessed through the RAII guards
28 /// returned from `lock` and `try_lock`, which guarantees that the data is only
29 /// ever accessed when the mutex is locked.
33 /// The mutexes in this module implement a strategy called "poisoning" where a
34 /// mutex is considered poisoned whenever a thread panics while holding the
35 /// lock. Once a mutex is poisoned, all other threads are unable to access the
36 /// data by default as it is likely tainted (some invariant is not being
39 /// For a mutex, this means that the `lock` and `try_lock` methods return a
40 /// `Result` which indicates whether a mutex has been poisoned or not. Most
41 /// usage of a mutex will simply `unwrap()` these results, propagating panics
42 /// among threads to ensure that a possibly invalid invariant is not witnessed.
44 /// A poisoned mutex, however, does not prevent all access to the underlying
45 /// data. The `PoisonError` type has an `into_inner` method which will return
46 /// the guard that would have otherwise been returned on a successful lock. This
47 /// allows access to the data, despite the lock being poisoned.
52 /// use std::sync::{Arc, Mutex};
54 /// use std::sync::mpsc::channel;
56 /// const N: usize = 10;
58 /// // Spawn a few threads to increment a shared variable (non-atomically), and
59 /// // let the main thread know once all increments are done.
61 /// // Here we're using an Arc to share memory among threads, and the data inside
62 /// // the Arc is protected with a mutex.
63 /// let data = Arc::new(Mutex::new(0));
65 /// let (tx, rx) = channel();
67 /// let (data, tx) = (data.clone(), tx.clone());
68 /// thread::spawn(move || {
69 /// // The shared state can only be accessed once the lock is held.
70 /// // Our non-atomic increment is safe because we're the only thread
71 /// // which can access the shared state when the lock is held.
73 /// // We unwrap() the return value to assert that we are not expecting
74 /// // threads to ever fail while holding the lock.
75 /// let mut data = data.lock().unwrap();
78 /// tx.send(()).unwrap();
80 /// // the lock is unlocked here when `data` goes out of scope.
84 /// rx.recv().unwrap();
87 /// To recover from a poisoned mutex:
90 /// use std::sync::{Arc, Mutex};
93 /// let lock = Arc::new(Mutex::new(0_u32));
94 /// let lock2 = lock.clone();
96 /// let _ = thread::spawn(move || -> () {
97 /// // This thread will acquire the mutex first, unwrapping the result of
98 /// // `lock` because the lock has not been poisoned.
99 /// let _lock = lock2.lock().unwrap();
101 /// // This panic while holding the lock (`_guard` is in scope) will poison
106 /// // The lock is poisoned by this point, but the returned result can be
107 /// // pattern matched on to return the underlying guard on both branches.
108 /// let mut guard = match lock.lock() {
109 /// Ok(guard) => guard,
110 /// Err(poisoned) => poisoned.into_inner(),
115 #[stable(feature = "rust1", since = "1.0.0")]
116 pub struct Mutex<T: ?Sized> {
117 // Note that this static mutex is in a *box*, not inlined into the struct
118 // itself. Once a native mutex has been used once, its address can never
119 // change (it can't be moved). This mutex type can be safely moved at any
120 // time, so to ensure that the native mutex is used correctly we box the
121 // inner lock to give it a constant address.
122 inner: Box<StaticMutex>,
126 // these are the only places where `T: Send` matters; all other
127 // functionality works fine on a single thread.
128 #[stable(feature = "rust1", since = "1.0.0")]
129 unsafe impl<T: ?Sized + Send> Send for Mutex<T> { }
130 #[stable(feature = "rust1", since = "1.0.0")]
131 unsafe impl<T: ?Sized + Send> Sync for Mutex<T> { }
133 /// The static mutex type is provided to allow for static allocation of mutexes.
135 /// Note that this is a separate type because using a Mutex correctly means that
136 /// it needs to have a destructor run. In Rust, statics are not allowed to have
137 /// destructors. As a result, a `StaticMutex` has one extra method when compared
138 /// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
139 /// documentation can be found directly on the method.
144 /// #![feature(static_mutex)]
146 /// use std::sync::{StaticMutex, MUTEX_INIT};
148 /// static LOCK: StaticMutex = MUTEX_INIT;
151 /// let _g = LOCK.lock().unwrap();
152 /// // do some productive work
154 /// // lock is unlocked here.
156 #[unstable(feature = "static_mutex",
157 reason = "may be merged with Mutex in the future",
159 pub struct StaticMutex {
161 poison: poison::Flag,
164 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
165 /// dropped (falls out of scope), the lock will be unlocked.
167 /// The data protected by the mutex can be access through this guard via its
168 /// `Deref` and `DerefMut` implementations
170 #[stable(feature = "rust1", since = "1.0.0")]
171 pub struct MutexGuard<'a, T: ?Sized + 'a> {
172 // funny underscores due to how Deref/DerefMut currently work (they
173 // disregard field privacy).
174 __lock: &'a StaticMutex,
175 __data: &'a UnsafeCell<T>,
176 __poison: poison::Guard,
179 #[stable(feature = "rust1", since = "1.0.0")]
180 impl<'a, T: ?Sized> !marker::Send for MutexGuard<'a, T> {}
182 /// Static initialization of a mutex. This constant can be used to initialize
183 /// other mutex constants.
184 #[unstable(feature = "static_mutex",
185 reason = "may be merged with Mutex in the future",
187 pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
190 /// Creates a new mutex in an unlocked state ready for use.
191 #[stable(feature = "rust1", since = "1.0.0")]
192 pub fn new(t: T) -> Mutex<T> {
194 inner: box StaticMutex::new(),
195 data: UnsafeCell::new(t),
200 impl<T: ?Sized> Mutex<T> {
201 /// Acquires a mutex, blocking the current thread until it is able to do so.
203 /// This function will block the local thread until it is available to acquire
204 /// the mutex. Upon returning, the thread is the only thread with the mutex
205 /// held. An RAII guard is returned to allow scoped unlock of the lock. When
206 /// the guard goes out of scope, the mutex will be unlocked.
210 /// If another user of this mutex panicked while holding the mutex, then
211 /// this call will return an error once the mutex is acquired.
212 #[stable(feature = "rust1", since = "1.0.0")]
213 pub fn lock(&self) -> LockResult<MutexGuard<T>> {
214 unsafe { self.inner.lock.lock() }
215 MutexGuard::new(&*self.inner, &self.data)
218 /// Attempts to acquire this lock.
220 /// If the lock could not be acquired at this time, then `Err` is returned.
221 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
222 /// guard is dropped.
224 /// This function does not block.
228 /// If another user of this mutex panicked while holding the mutex, then
229 /// this call will return failure if the mutex would otherwise be
231 #[stable(feature = "rust1", since = "1.0.0")]
232 pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
233 if unsafe { self.inner.lock.try_lock() } {
234 Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
236 Err(TryLockError::WouldBlock)
240 /// Determines whether the lock is poisoned.
242 /// If another thread is active, the lock can still become poisoned at any
243 /// time. You should not trust a `false` value for program correctness
244 /// without additional synchronization.
246 #[stable(feature = "sync_poison", since = "1.2.0")]
247 pub fn is_poisoned(&self) -> bool {
248 self.inner.poison.get()
251 /// Consumes this mutex, returning the underlying data.
255 /// If another user of this mutex panicked while holding the mutex, then
256 /// this call will return an error instead.
257 #[stable(feature = "mutex_into_inner", since = "1.6.0")]
258 pub fn into_inner(self) -> LockResult<T> where T: Sized {
259 // We know statically that there are no outstanding references to
260 // `self` so there's no need to lock the inner StaticMutex.
262 // To get the inner value, we'd like to call `data.into_inner()`,
263 // but because `Mutex` impl-s `Drop`, we can't move out of it, so
264 // we'll have to destructure it manually instead.
266 // Like `let Mutex { inner, data } = self`.
267 let (inner, data) = {
268 let Mutex { ref inner, ref data } = self;
269 (ptr::read(inner), ptr::read(data))
272 inner.lock.destroy(); // Keep in sync with the `Drop` impl.
274 poison::map_result(inner.poison.borrow(), |_| data.into_inner())
278 /// Returns a mutable reference to the underlying data.
280 /// Since this call borrows the `Mutex` mutably, no actual locking needs to
281 /// take place---the mutable borrow statically guarantees no locks exist.
285 /// If another user of this mutex panicked while holding the mutex, then
286 /// this call will return an error instead.
287 #[stable(feature = "mutex_get_mut", since = "1.6.0")]
288 pub fn get_mut(&mut self) -> LockResult<&mut T> {
289 // We know statically that there are no other references to `self`, so
290 // there's no need to lock the inner StaticMutex.
291 let data = unsafe { &mut *self.data.get() };
292 poison::map_result(self.inner.poison.borrow(), |_| data )
296 #[stable(feature = "rust1", since = "1.0.0")]
297 impl<T: ?Sized> Drop for Mutex<T> {
298 #[unsafe_destructor_blind_to_params]
300 // This is actually safe b/c we know that there is no further usage of
301 // this mutex (it's up to the user to arrange for a mutex to get
302 // dropped, that's not our job)
304 // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
305 unsafe { self.inner.lock.destroy() }
309 #[stable(feature = "rust1", since = "1.0.0")]
310 impl<T: ?Sized + fmt::Debug + 'static> fmt::Debug for Mutex<T> {
311 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
312 match self.try_lock() {
313 Ok(guard) => write!(f, "Mutex {{ data: {:?} }}", &*guard),
314 Err(TryLockError::Poisoned(err)) => {
315 write!(f, "Mutex {{ data: Poisoned({:?}) }}", &**err.get_ref())
317 Err(TryLockError::WouldBlock) => write!(f, "Mutex {{ <locked> }}")
322 struct Dummy(UnsafeCell<()>);
323 unsafe impl Sync for Dummy {}
324 static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
326 #[unstable(feature = "static_mutex",
327 reason = "may be merged with Mutex in the future",
330 /// Creates a new mutex in an unlocked state ready for use.
331 pub const fn new() -> StaticMutex {
333 lock: sys::Mutex::new(),
334 poison: poison::Flag::new(),
338 /// Acquires this lock, see `Mutex::lock`
340 pub fn lock(&'static self) -> LockResult<MutexGuard<()>> {
341 unsafe { self.lock.lock() }
342 MutexGuard::new(self, &DUMMY.0)
345 /// Attempts to grab this lock, see `Mutex::try_lock`
347 pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
348 if unsafe { self.lock.try_lock() } {
349 Ok(try!(MutexGuard::new(self, &DUMMY.0)))
351 Err(TryLockError::WouldBlock)
355 /// Deallocates resources associated with this static mutex.
357 /// This method is unsafe because it provides no guarantees that there are
358 /// no active users of this mutex, and safety is not guaranteed if there are
359 /// active users of this mutex.
361 /// This method is required to ensure that there are no memory leaks on
362 /// *all* platforms. It may be the case that some platforms do not leak
363 /// memory if this method is not called, but this is not guaranteed to be
364 /// true on all platforms.
365 pub unsafe fn destroy(&'static self) {
370 impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
372 fn new(lock: &'mutex StaticMutex, data: &'mutex UnsafeCell<T>)
373 -> LockResult<MutexGuard<'mutex, T>> {
374 poison::map_result(lock.poison.borrow(), |guard| {
384 #[stable(feature = "rust1", since = "1.0.0")]
385 impl<'mutex, T: ?Sized> Deref for MutexGuard<'mutex, T> {
388 fn deref(&self) -> &T {
389 unsafe { &*self.__data.get() }
393 #[stable(feature = "rust1", since = "1.0.0")]
394 impl<'mutex, T: ?Sized> DerefMut for MutexGuard<'mutex, T> {
395 fn deref_mut(&mut self) -> &mut T {
396 unsafe { &mut *self.__data.get() }
400 #[stable(feature = "rust1", since = "1.0.0")]
401 impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
405 self.__lock.poison.done(&self.__poison);
406 self.__lock.lock.unlock();
411 pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
415 pub fn guard_poison<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
423 use sync::mpsc::channel;
424 use sync::{Arc, Mutex, StaticMutex, Condvar};
425 use sync::atomic::{AtomicUsize, Ordering};
428 struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
430 #[derive(Eq, PartialEq, Debug)]
433 unsafe impl<T: Send> Send for Packet<T> {}
434 unsafe impl<T> Sync for Packet<T> {}
438 let m = Mutex::new(());
439 drop(m.lock().unwrap());
440 drop(m.lock().unwrap());
445 static M: StaticMutex = StaticMutex::new();
447 drop(M.lock().unwrap());
448 drop(M.lock().unwrap());
455 static M: StaticMutex = StaticMutex::new();
456 static mut CNT: u32 = 0;
463 let _g = M.lock().unwrap();
469 let (tx, rx) = channel();
471 let tx2 = tx.clone();
472 thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
473 let tx2 = tx.clone();
474 thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
481 assert_eq!(unsafe {CNT}, J * K * 2);
489 let m = Mutex::new(());
490 *m.try_lock().unwrap() = ();
494 fn test_into_inner() {
495 let m = Mutex::new(NonCopy(10));
496 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
500 fn test_into_inner_drop() {
501 struct Foo(Arc<AtomicUsize>);
504 self.0.fetch_add(1, Ordering::SeqCst);
507 let num_drops = Arc::new(AtomicUsize::new(0));
508 let m = Mutex::new(Foo(num_drops.clone()));
509 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
511 let _inner = m.into_inner().unwrap();
512 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
514 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
518 fn test_into_inner_poison() {
519 let m = Arc::new(Mutex::new(NonCopy(10)));
521 let _ = thread::spawn(move || {
522 let _lock = m2.lock().unwrap();
523 panic!("test panic in inner thread to poison mutex");
526 assert!(m.is_poisoned());
527 match Arc::try_unwrap(m).unwrap().into_inner() {
528 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
529 Ok(x) => panic!("into_inner of poisoned Mutex is Ok: {:?}", x),
535 let mut m = Mutex::new(NonCopy(10));
536 *m.get_mut().unwrap() = NonCopy(20);
537 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
541 fn test_get_mut_poison() {
542 let m = Arc::new(Mutex::new(NonCopy(10)));
544 let _ = thread::spawn(move || {
545 let _lock = m2.lock().unwrap();
546 panic!("test panic in inner thread to poison mutex");
549 assert!(m.is_poisoned());
550 match Arc::try_unwrap(m).unwrap().get_mut() {
551 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
552 Ok(x) => panic!("get_mut of poisoned Mutex is Ok: {:?}", x),
557 fn test_mutex_arc_condvar() {
558 let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
559 let packet2 = Packet(packet.0.clone());
560 let (tx, rx) = channel();
561 let _t = thread::spawn(move|| {
562 // wait until parent gets in
564 let &(ref lock, ref cvar) = &*packet2.0;
565 let mut lock = lock.lock().unwrap();
570 let &(ref lock, ref cvar) = &*packet.0;
571 let mut lock = lock.lock().unwrap();
572 tx.send(()).unwrap();
575 lock = cvar.wait(lock).unwrap();
580 fn test_arc_condvar_poison() {
581 let packet = Packet(Arc::new((Mutex::new(1), Condvar::new())));
582 let packet2 = Packet(packet.0.clone());
583 let (tx, rx) = channel();
585 let _t = thread::spawn(move || -> () {
587 let &(ref lock, ref cvar) = &*packet2.0;
588 let _g = lock.lock().unwrap();
590 // Parent should fail when it wakes up.
594 let &(ref lock, ref cvar) = &*packet.0;
595 let mut lock = lock.lock().unwrap();
596 tx.send(()).unwrap();
598 match cvar.wait(lock) {
601 assert_eq!(*lock, 1);
609 fn test_mutex_arc_poison() {
610 let arc = Arc::new(Mutex::new(1));
611 assert!(!arc.is_poisoned());
612 let arc2 = arc.clone();
613 let _ = thread::spawn(move|| {
614 let lock = arc2.lock().unwrap();
615 assert_eq!(*lock, 2);
617 assert!(arc.lock().is_err());
618 assert!(arc.is_poisoned());
622 fn test_mutex_arc_nested() {
623 // Tests nested mutexes and access
624 // to underlying data.
625 let arc = Arc::new(Mutex::new(1));
626 let arc2 = Arc::new(Mutex::new(arc));
627 let (tx, rx) = channel();
628 let _t = thread::spawn(move|| {
629 let lock = arc2.lock().unwrap();
630 let lock2 = lock.lock().unwrap();
631 assert_eq!(*lock2, 1);
632 tx.send(()).unwrap();
638 fn test_mutex_arc_access_in_unwind() {
639 let arc = Arc::new(Mutex::new(1));
640 let arc2 = arc.clone();
641 let _ = thread::spawn(move|| -> () {
645 impl Drop for Unwinder {
647 *self.i.lock().unwrap() += 1;
650 let _u = Unwinder { i: arc2 };
653 let lock = arc.lock().unwrap();
654 assert_eq!(*lock, 2);
658 fn test_mutex_unsized() {
659 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
661 let b = &mut *mutex.lock().unwrap();
665 let comp: &[i32] = &[4, 2, 5];
666 assert_eq!(&*mutex.lock().unwrap(), comp);