1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops::{Deref, DerefMut};
19 use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
20 use sys_common::rwlock as sys;
22 /// A reader-writer lock
24 /// This type of lock allows a number of readers or at most one writer at any
25 /// point in time. The write portion of this lock typically allows modification
26 /// of the underlying data (exclusive access) and the read portion of this lock
27 /// typically allows for read-only access (shared access).
29 /// The priority policy of the lock is dependent on the underlying operating
30 /// system's implementation, and this type does not guarantee that any
31 /// particular policy will be used.
33 /// The type parameter `T` represents the data that this lock protects. It is
34 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
35 /// allow concurrent access through readers. The RAII guards returned from the
36 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
37 /// to allow access to the contained of the lock.
41 /// RwLocks, like Mutexes, will become poisoned on panics. Note, however, that
42 /// an RwLock may only be poisoned if a panic occurs while it is locked
43 /// exclusively (write mode). If a panic occurs in any reader, then the lock
44 /// will not be poisoned.
49 /// use std::sync::RwLock;
51 /// let lock = RwLock::new(5);
53 /// // many reader locks can be held at once
55 /// let r1 = lock.read().unwrap();
56 /// let r2 = lock.read().unwrap();
57 /// assert_eq!(*r1, 5);
58 /// assert_eq!(*r2, 5);
59 /// } // read locks are dropped at this point
61 /// // only one write lock may be held, however
63 /// let mut w = lock.write().unwrap();
65 /// assert_eq!(*w, 6);
66 /// } // write lock is dropped here
68 #[stable(feature = "rust1", since = "1.0.0")]
69 pub struct RwLock<T: ?Sized> {
70 inner: Box<StaticRwLock>,
74 #[stable(feature = "rust1", since = "1.0.0")]
75 unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {}
76 #[stable(feature = "rust1", since = "1.0.0")]
77 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
79 /// Structure representing a statically allocated RwLock.
81 /// This structure is intended to be used inside of a `static` and will provide
82 /// automatic global access as well as lazy initialization. The internal
83 /// resources of this RwLock, however, must be manually deallocated.
88 /// #![feature(static_rwlock)]
90 /// use std::sync::{StaticRwLock, RW_LOCK_INIT};
92 /// static LOCK: StaticRwLock = RW_LOCK_INIT;
95 /// let _g = LOCK.read().unwrap();
96 /// // ... shared read access
99 /// let _g = LOCK.write().unwrap();
100 /// // ... exclusive write access
102 /// unsafe { LOCK.destroy() } // free all resources
104 #[unstable(feature = "static_rwlock",
105 reason = "may be merged with RwLock in the future",
107 pub struct StaticRwLock {
109 poison: poison::Flag,
112 /// Constant initialization for a statically-initialized rwlock.
113 #[unstable(feature = "static_rwlock",
114 reason = "may be merged with RwLock in the future",
116 pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
118 /// RAII structure used to release the shared read access of a lock when
121 #[stable(feature = "rust1", since = "1.0.0")]
122 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
123 __lock: &'a StaticRwLock,
127 #[stable(feature = "rust1", since = "1.0.0")]
128 impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {}
130 /// RAII structure used to release the exclusive write access of a lock when
133 #[stable(feature = "rust1", since = "1.0.0")]
134 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
135 __lock: &'a StaticRwLock,
137 __poison: poison::Guard,
140 #[stable(feature = "rust1", since = "1.0.0")]
141 impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {}
144 /// Creates a new instance of an `RwLock<T>` which is unlocked.
149 /// use std::sync::RwLock;
151 /// let lock = RwLock::new(5);
153 #[stable(feature = "rust1", since = "1.0.0")]
154 pub fn new(t: T) -> RwLock<T> {
155 RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
159 impl<T: ?Sized> RwLock<T> {
160 /// Locks this rwlock with shared read access, blocking the current thread
161 /// until it can be acquired.
163 /// The calling thread will be blocked until there are no more writers which
164 /// hold the lock. There may be other readers currently inside the lock when
165 /// this method returns. This method does not provide any guarantees with
166 /// respect to the ordering of whether contentious readers or writers will
167 /// acquire the lock first.
169 /// Returns an RAII guard which will release this thread's shared access
170 /// once it is dropped.
174 /// This function will return an error if the RwLock is poisoned. An RwLock
175 /// is poisoned whenever a writer panics while holding an exclusive lock.
176 /// The failure will occur immediately after the lock has been acquired.
178 #[stable(feature = "rust1", since = "1.0.0")]
179 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
181 self.inner.lock.read();
182 RwLockReadGuard::new(&*self.inner, &self.data)
186 /// Attempts to acquire this rwlock with shared read access.
188 /// If the access could not be granted at this time, then `Err` is returned.
189 /// Otherwise, an RAII guard is returned which will release the shared access
190 /// when it is dropped.
192 /// This function does not block.
194 /// This function does not provide any guarantees with respect to the ordering
195 /// of whether contentious readers or writers will acquire the lock first.
199 /// This function will return an error if the RwLock is poisoned. An RwLock
200 /// is poisoned whenever a writer panics while holding an exclusive lock. An
201 /// error will only be returned if the lock would have otherwise been
204 #[stable(feature = "rust1", since = "1.0.0")]
205 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
207 if self.inner.lock.try_read() {
208 Ok(RwLockReadGuard::new(&*self.inner, &self.data)?)
210 Err(TryLockError::WouldBlock)
215 /// Locks this rwlock with exclusive write access, blocking the current
216 /// thread until it can be acquired.
218 /// This function will not return while other writers or other readers
219 /// currently have access to the lock.
221 /// Returns an RAII guard which will drop the write access of this rwlock
226 /// This function will return an error if the RwLock is poisoned. An RwLock
227 /// is poisoned whenever a writer panics while holding an exclusive lock.
228 /// An error will be returned when the lock is acquired.
230 #[stable(feature = "rust1", since = "1.0.0")]
231 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
233 self.inner.lock.write();
234 RwLockWriteGuard::new(&*self.inner, &self.data)
238 /// Attempts to lock this rwlock with exclusive write access.
240 /// If the lock could not be acquired at this time, then `Err` is returned.
241 /// Otherwise, an RAII guard is returned which will release the lock when
244 /// This function does not block.
246 /// This function does not provide any guarantees with respect to the ordering
247 /// of whether contentious readers or writers will acquire the lock first.
251 /// This function will return an error if the RwLock is poisoned. An RwLock
252 /// is poisoned whenever a writer panics while holding an exclusive lock. An
253 /// error will only be returned if the lock would have otherwise been
256 #[stable(feature = "rust1", since = "1.0.0")]
257 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
259 if self.inner.lock.try_write() {
260 Ok(RwLockWriteGuard::new(&*self.inner, &self.data)?)
262 Err(TryLockError::WouldBlock)
267 /// Determines whether the lock is poisoned.
269 /// If another thread is active, the lock can still become poisoned at any
270 /// time. You should not trust a `false` value for program correctness
271 /// without additional synchronization.
273 #[stable(feature = "sync_poison", since = "1.2.0")]
274 pub fn is_poisoned(&self) -> bool {
275 self.inner.poison.get()
278 /// Consumes this `RwLock`, returning the underlying data.
282 /// This function will return an error if the RwLock is poisoned. An RwLock
283 /// is poisoned whenever a writer panics while holding an exclusive lock. An
284 /// error will only be returned if the lock would have otherwise been
286 #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
287 pub fn into_inner(self) -> LockResult<T> where T: Sized {
288 // We know statically that there are no outstanding references to
289 // `self` so there's no need to lock the inner StaticRwLock.
291 // To get the inner value, we'd like to call `data.into_inner()`,
292 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
293 // we'll have to destructure it manually instead.
295 // Like `let RwLock { inner, data } = self`.
296 let (inner, data) = {
297 let RwLock { ref inner, ref data } = self;
298 (ptr::read(inner), ptr::read(data))
301 inner.lock.destroy(); // Keep in sync with the `Drop` impl.
303 poison::map_result(inner.poison.borrow(), |_| data.into_inner())
307 /// Returns a mutable reference to the underlying data.
309 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
310 /// take place---the mutable borrow statically guarantees no locks exist.
314 /// This function will return an error if the RwLock is poisoned. An RwLock
315 /// is poisoned whenever a writer panics while holding an exclusive lock. An
316 /// error will only be returned if the lock would have otherwise been
318 #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
319 pub fn get_mut(&mut self) -> LockResult<&mut T> {
320 // We know statically that there are no other references to `self`, so
321 // there's no need to lock the inner StaticRwLock.
322 let data = unsafe { &mut *self.data.get() };
323 poison::map_result(self.inner.poison.borrow(), |_| data )
327 #[stable(feature = "rust1", since = "1.0.0")]
328 impl<T: ?Sized> Drop for RwLock<T> {
329 #[unsafe_destructor_blind_to_params]
331 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
332 unsafe { self.inner.lock.destroy() }
336 #[stable(feature = "rust1", since = "1.0.0")]
337 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
338 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
339 match self.try_read() {
340 Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
341 Err(TryLockError::Poisoned(err)) => {
342 write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref())
344 Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}")
349 #[stable(feature = "rw_lock_default", since = "1.9.0")]
350 impl<T: Default> Default for RwLock<T> {
351 fn default() -> RwLock<T> {
352 RwLock::new(Default::default())
356 struct Dummy(UnsafeCell<()>);
357 unsafe impl Sync for Dummy {}
358 static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
360 #[unstable(feature = "static_rwlock",
361 reason = "may be merged with RwLock in the future",
364 /// Creates a new rwlock.
365 pub const fn new() -> StaticRwLock {
367 lock: sys::RWLock::new(),
368 poison: poison::Flag::new(),
372 /// Locks this rwlock with shared read access, blocking the current thread
373 /// until it can be acquired.
375 /// See `RwLock::read`.
377 pub fn read(&'static self) -> LockResult<RwLockReadGuard<'static, ()>> {
380 RwLockReadGuard::new(self, &DUMMY.0)
384 /// Attempts to acquire this lock with shared read access.
386 /// See `RwLock::try_read`.
388 pub fn try_read(&'static self)
389 -> TryLockResult<RwLockReadGuard<'static, ()>> {
391 if self.lock.try_read(){
392 Ok(RwLockReadGuard::new(self, &DUMMY.0)?)
394 Err(TryLockError::WouldBlock)
399 /// Locks this rwlock with exclusive write access, blocking the current
400 /// thread until it can be acquired.
402 /// See `RwLock::write`.
404 pub fn write(&'static self) -> LockResult<RwLockWriteGuard<'static, ()>> {
407 RwLockWriteGuard::new(self, &DUMMY.0)
411 /// Attempts to lock this rwlock with exclusive write access.
413 /// See `RwLock::try_write`.
415 pub fn try_write(&'static self)
416 -> TryLockResult<RwLockWriteGuard<'static, ()>> {
418 if self.lock.try_write() {
419 Ok(RwLockWriteGuard::new(self, &DUMMY.0)?)
421 Err(TryLockError::WouldBlock)
426 /// Deallocates all resources associated with this static lock.
428 /// This method is unsafe to call as there is no guarantee that there are no
429 /// active users of the lock, and this also doesn't prevent any future users
430 /// of this lock. This method is required to be called to not leak memory on
432 pub unsafe fn destroy(&'static self) {
437 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
438 unsafe fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
439 -> LockResult<RwLockReadGuard<'rwlock, T>> {
440 poison::map_result(lock.poison.borrow(), |_| {
443 __data: &*data.get(),
448 /// Transform this guard to hold a sub-borrow of the original data.
450 /// Applies the supplied closure to the data, returning a new lock
451 /// guard referencing the borrow returned by the closure.
456 /// # #![feature(guard_map)]
457 /// # use std::sync::{RwLockReadGuard, RwLock};
458 /// let x = RwLock::new(vec![1, 2]);
460 /// let y = RwLockReadGuard::map(x.read().unwrap(), |v| &v[0]);
461 /// assert_eq!(*y, 1);
463 #[unstable(feature = "guard_map",
464 reason = "recently added, needs RFC for stabilization,
465 questionable interaction with Condvar",
467 #[rustc_deprecated(since = "1.8.0",
468 reason = "unsound on Mutex because of Condvar and \
469 RwLock may also with to be used with Condvar \
471 pub fn map<U: ?Sized, F>(this: Self, cb: F) -> RwLockReadGuard<'rwlock, U>
472 where F: FnOnce(&T) -> &U
474 let new = RwLockReadGuard {
476 __data: cb(this.__data)
485 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
486 unsafe fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
487 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
488 poison::map_result(lock.poison.borrow(), |guard| {
491 __data: &mut *data.get(),
497 /// Transform this guard to hold a sub-borrow of the original data.
499 /// Applies the supplied closure to the data, returning a new lock
500 /// guard referencing the borrow returned by the closure.
505 /// # #![feature(guard_map)]
506 /// # use std::sync::{RwLockWriteGuard, RwLock};
507 /// let x = RwLock::new(vec![1, 2]);
510 /// let mut y = RwLockWriteGuard::map(x.write().unwrap(), |v| &mut v[0]);
511 /// assert_eq!(*y, 1);
516 /// assert_eq!(&**x.read().unwrap(), &[10, 2]);
518 #[unstable(feature = "guard_map",
519 reason = "recently added, needs RFC for stabilization,
520 questionable interaction with Condvar",
522 #[rustc_deprecated(since = "1.8.0",
523 reason = "unsound on Mutex because of Condvar and \
524 RwLock may also with to be used with Condvar \
526 pub fn map<U: ?Sized, F>(this: Self, cb: F) -> RwLockWriteGuard<'rwlock, U>
527 where F: FnOnce(&mut T) -> &mut U
529 // Compute the new data while still owning the original lock
530 // in order to correctly poison if the callback panics.
531 let data = unsafe { ptr::read(&this.__data) };
532 let new_data = cb(data);
534 // We don't want to unlock the lock by running the destructor of the
535 // original lock, so just read the fields we need and forget it.
536 let (poison, lock) = unsafe {
537 (ptr::read(&this.__poison), ptr::read(&this.__lock))
549 #[stable(feature = "rust1", since = "1.0.0")]
550 impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
553 fn deref(&self) -> &T { self.__data }
556 #[stable(feature = "rust1", since = "1.0.0")]
557 impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
560 fn deref(&self) -> &T { self.__data }
563 #[stable(feature = "rust1", since = "1.0.0")]
564 impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
565 fn deref_mut(&mut self) -> &mut T { self.__data
569 #[stable(feature = "rust1", since = "1.0.0")]
570 impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
572 unsafe { self.__lock.lock.read_unlock(); }
576 #[stable(feature = "rust1", since = "1.0.0")]
577 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
579 self.__lock.poison.done(&self.__poison);
580 unsafe { self.__lock.lock.write_unlock(); }
586 #![allow(deprecated)] // rand
590 use rand::{self, Rng};
591 use sync::mpsc::channel;
593 use sync::{Arc, RwLock, StaticRwLock, TryLockError, RwLockWriteGuard};
594 use sync::atomic::{AtomicUsize, Ordering};
596 #[derive(Eq, PartialEq, Debug)]
601 let l = RwLock::new(());
602 drop(l.read().unwrap());
603 drop(l.write().unwrap());
604 drop((l.read().unwrap(), l.read().unwrap()));
605 drop(l.write().unwrap());
610 static R: StaticRwLock = StaticRwLock::new();
611 drop(R.read().unwrap());
612 drop(R.write().unwrap());
613 drop((R.read().unwrap(), R.read().unwrap()));
614 drop(R.write().unwrap());
615 unsafe { R.destroy(); }
620 static R: StaticRwLock = StaticRwLock::new();
622 const M: usize = 1000;
624 let (tx, rx) = channel::<()>();
627 thread::spawn(move|| {
628 let mut rng = rand::thread_rng();
630 if rng.gen_weighted_bool(N) {
631 drop(R.write().unwrap());
633 drop(R.read().unwrap());
641 unsafe { R.destroy(); }
645 fn test_rw_arc_poison_wr() {
646 let arc = Arc::new(RwLock::new(1));
647 let arc2 = arc.clone();
648 let _: Result<(), _> = thread::spawn(move|| {
649 let _lock = arc2.write().unwrap();
652 assert!(arc.read().is_err());
656 fn test_rw_arc_poison_ww() {
657 let arc = Arc::new(RwLock::new(1));
658 assert!(!arc.is_poisoned());
659 let arc2 = arc.clone();
660 let _: Result<(), _> = thread::spawn(move|| {
661 let _lock = arc2.write().unwrap();
664 assert!(arc.write().is_err());
665 assert!(arc.is_poisoned());
669 fn test_rw_arc_no_poison_rr() {
670 let arc = Arc::new(RwLock::new(1));
671 let arc2 = arc.clone();
672 let _: Result<(), _> = thread::spawn(move|| {
673 let _lock = arc2.read().unwrap();
676 let lock = arc.read().unwrap();
677 assert_eq!(*lock, 1);
680 fn test_rw_arc_no_poison_rw() {
681 let arc = Arc::new(RwLock::new(1));
682 let arc2 = arc.clone();
683 let _: Result<(), _> = thread::spawn(move|| {
684 let _lock = arc2.read().unwrap();
687 let lock = arc.write().unwrap();
688 assert_eq!(*lock, 1);
693 let arc = Arc::new(RwLock::new(0));
694 let arc2 = arc.clone();
695 let (tx, rx) = channel();
697 thread::spawn(move|| {
698 let mut lock = arc2.write().unwrap();
705 tx.send(()).unwrap();
708 // Readers try to catch the writer in the act
709 let mut children = Vec::new();
711 let arc3 = arc.clone();
712 children.push(thread::spawn(move|| {
713 let lock = arc3.read().unwrap();
718 // Wait for children to pass their asserts
720 assert!(r.join().is_ok());
723 // Wait for writer to finish
725 let lock = arc.read().unwrap();
726 assert_eq!(*lock, 10);
730 fn test_rw_arc_access_in_unwind() {
731 let arc = Arc::new(RwLock::new(1));
732 let arc2 = arc.clone();
733 let _ = thread::spawn(move|| -> () {
735 i: Arc<RwLock<isize>>,
737 impl Drop for Unwinder {
739 let mut lock = self.i.write().unwrap();
743 let _u = Unwinder { i: arc2 };
746 let lock = arc.read().unwrap();
747 assert_eq!(*lock, 2);
751 fn test_rwlock_unsized() {
752 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
754 let b = &mut *rw.write().unwrap();
758 let comp: &[i32] = &[4, 2, 5];
759 assert_eq!(&*rw.read().unwrap(), comp);
763 fn test_rwlock_try_write() {
766 let lock = RwLock::new(0isize);
767 let read_guard = lock.read().unwrap();
769 let write_result = lock.try_write();
771 Err(TryLockError::WouldBlock) => (),
772 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
773 Err(_) => assert!(false, "unexpected error"),
780 fn test_into_inner() {
781 let m = RwLock::new(NonCopy(10));
782 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
786 fn test_into_inner_drop() {
787 struct Foo(Arc<AtomicUsize>);
790 self.0.fetch_add(1, Ordering::SeqCst);
793 let num_drops = Arc::new(AtomicUsize::new(0));
794 let m = RwLock::new(Foo(num_drops.clone()));
795 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
797 let _inner = m.into_inner().unwrap();
798 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
800 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
804 fn test_into_inner_poison() {
805 let m = Arc::new(RwLock::new(NonCopy(10)));
807 let _ = thread::spawn(move || {
808 let _lock = m2.write().unwrap();
809 panic!("test panic in inner thread to poison RwLock");
812 assert!(m.is_poisoned());
813 match Arc::try_unwrap(m).unwrap().into_inner() {
814 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
815 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
821 let mut m = RwLock::new(NonCopy(10));
822 *m.get_mut().unwrap() = NonCopy(20);
823 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
827 fn test_get_mut_poison() {
828 let m = Arc::new(RwLock::new(NonCopy(10)));
830 let _ = thread::spawn(move || {
831 let _lock = m2.write().unwrap();
832 panic!("test panic in inner thread to poison RwLock");
835 assert!(m.is_poisoned());
836 match Arc::try_unwrap(m).unwrap().get_mut() {
837 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
838 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),
843 fn test_rwlock_write_map_poison() {
844 let rwlock = Arc::new(RwLock::new(vec![1, 2]));
845 let rwlock2 = rwlock.clone();
847 thread::spawn(move || {
848 let _ = RwLockWriteGuard::map::<usize, _>(rwlock2.write().unwrap(), |_| panic!());
849 }).join().unwrap_err();
851 match rwlock.read() {
852 Ok(r) => panic!("Read lock on poisioned RwLock is Ok: {:?}", &*r),