1 use crate::cell::UnsafeCell;
4 use crate::ops::{Deref, DerefMut};
6 use crate::sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
7 use crate::sys_common::rwlock as sys;
9 /// A reader-writer lock
11 /// This type of lock allows a number of readers or at most one writer at any
12 /// point in time. The write portion of this lock typically allows modification
13 /// of the underlying data (exclusive access) and the read portion of this lock
14 /// typically allows for read-only access (shared access).
16 /// In comparison, a [`Mutex`] does not distinguish between readers or writers
17 /// that acquire the lock, therefore blocking any threads waiting for the lock to
18 /// become available. An `RwLock` will allow any number of readers to acquire the
19 /// lock as long as a writer is not holding the lock.
21 /// The priority policy of the lock is dependent on the underlying operating
22 /// system's implementation, and this type does not guarantee that any
23 /// particular policy will be used.
25 /// The type parameter `T` represents the data that this lock protects. It is
26 /// required that `T` satisfies [`Send`] to be shared across threads and
27 /// [`Sync`] to allow concurrent access through readers. The RAII guards
28 /// returned from the locking methods implement [`Deref`][] (and [`DerefMut`]
29 /// for the `write` methods) to allow access to the content of the lock.
33 /// An `RwLock`, like [`Mutex`], will become poisoned on a panic. Note, however,
34 /// that an `RwLock` may only be poisoned if a panic occurs while it is locked
35 /// exclusively (write mode). If a panic occurs in any reader, then the lock
36 /// will not be poisoned.
41 /// use std::sync::RwLock;
43 /// let lock = RwLock::new(5);
45 /// // many reader locks can be held at once
47 /// let r1 = lock.read().unwrap();
48 /// let r2 = lock.read().unwrap();
49 /// assert_eq!(*r1, 5);
50 /// assert_eq!(*r2, 5);
51 /// } // read locks are dropped at this point
53 /// // only one write lock may be held, however
55 /// let mut w = lock.write().unwrap();
57 /// assert_eq!(*w, 6);
58 /// } // write lock is dropped here
61 /// [`Deref`]: ../../std/ops/trait.Deref.html
62 /// [`DerefMut`]: ../../std/ops/trait.DerefMut.html
63 /// [`Send`]: ../../std/marker/trait.Send.html
64 /// [`Sync`]: ../../std/marker/trait.Sync.html
65 /// [`Mutex`]: struct.Mutex.html
66 #[stable(feature = "rust1", since = "1.0.0")]
67 pub struct RwLock<T: ?Sized> {
68 inner: Box<sys::RWLock>,
73 #[stable(feature = "rust1", since = "1.0.0")]
74 unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
75 #[stable(feature = "rust1", since = "1.0.0")]
76 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
78 /// RAII structure used to release the shared read access of a lock when
81 /// This structure is created by the [`read`] and [`try_read`] methods on
84 /// [`read`]: struct.RwLock.html#method.read
85 /// [`try_read`]: struct.RwLock.html#method.try_read
86 /// [`RwLock`]: struct.RwLock.html
87 #[must_use = "if unused the RwLock will immediately unlock"]
88 #[stable(feature = "rust1", since = "1.0.0")]
89 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
90 __lock: &'a RwLock<T>,
93 #[stable(feature = "rust1", since = "1.0.0")]
94 impl<T: ?Sized> !Send for RwLockReadGuard<'_, T> {}
96 #[stable(feature = "rwlock_guard_sync", since = "1.23.0")]
97 unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
99 /// RAII structure used to release the exclusive write access of a lock when
102 /// This structure is created by the [`write`] and [`try_write`] methods
105 /// [`write`]: struct.RwLock.html#method.write
106 /// [`try_write`]: struct.RwLock.html#method.try_write
107 /// [`RwLock`]: struct.RwLock.html
108 #[must_use = "if unused the RwLock will immediately unlock"]
109 #[stable(feature = "rust1", since = "1.0.0")]
110 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
111 __lock: &'a RwLock<T>,
112 __poison: poison::Guard,
115 #[stable(feature = "rust1", since = "1.0.0")]
116 impl<T: ?Sized> !Send for RwLockWriteGuard<'_, T> {}
118 #[stable(feature = "rwlock_guard_sync", since = "1.23.0")]
119 unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
122 /// Creates a new instance of an `RwLock<T>` which is unlocked.
127 /// use std::sync::RwLock;
129 /// let lock = RwLock::new(5);
131 #[stable(feature = "rust1", since = "1.0.0")]
132 pub fn new(t: T) -> RwLock<T> {
134 inner: box sys::RWLock::new(),
135 poison: poison::Flag::new(),
136 data: UnsafeCell::new(t),
141 impl<T: ?Sized> RwLock<T> {
142 /// Locks this rwlock with shared read access, blocking the current thread
143 /// until it can be acquired.
145 /// The calling thread will be blocked until there are no more writers which
146 /// hold the lock. There may be other readers currently inside the lock when
147 /// this method returns. This method does not provide any guarantees with
148 /// respect to the ordering of whether contentious readers or writers will
149 /// acquire the lock first.
151 /// Returns an RAII guard which will release this thread's shared access
152 /// once it is dropped.
156 /// This function will return an error if the RwLock is poisoned. An RwLock
157 /// is poisoned whenever a writer panics while holding an exclusive lock.
158 /// The failure will occur immediately after the lock has been acquired.
162 /// This function might panic when called if the lock is already held by the current thread.
167 /// use std::sync::{Arc, RwLock};
170 /// let lock = Arc::new(RwLock::new(1));
171 /// let c_lock = lock.clone();
173 /// let n = lock.read().unwrap();
174 /// assert_eq!(*n, 1);
176 /// thread::spawn(move || {
177 /// let r = c_lock.read();
178 /// assert!(r.is_ok());
179 /// }).join().unwrap();
182 #[stable(feature = "rust1", since = "1.0.0")]
183 pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
186 RwLockReadGuard::new(self)
190 /// Attempts to acquire this rwlock with shared read access.
192 /// If the access could not be granted at this time, then `Err` is returned.
193 /// Otherwise, an RAII guard is returned which will release the shared access
194 /// when it is dropped.
196 /// This function does not block.
198 /// This function does not provide any guarantees with respect to the ordering
199 /// of whether contentious readers or writers will acquire the lock first.
203 /// This function will return an error if the RwLock is poisoned. An RwLock
204 /// is poisoned whenever a writer panics while holding an exclusive lock. An
205 /// error will only be returned if the lock would have otherwise been
211 /// use std::sync::RwLock;
213 /// let lock = RwLock::new(1);
215 /// match lock.try_read() {
216 /// Ok(n) => assert_eq!(*n, 1),
217 /// Err(_) => unreachable!(),
221 #[stable(feature = "rust1", since = "1.0.0")]
222 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
224 if self.inner.try_read() {
225 Ok(RwLockReadGuard::new(self)?)
227 Err(TryLockError::WouldBlock)
232 /// Locks this rwlock with exclusive write access, blocking the current
233 /// thread until it can be acquired.
235 /// This function will not return while other writers or other readers
236 /// currently have access to the lock.
238 /// Returns an RAII guard which will drop the write access of this rwlock
243 /// This function will return an error if the RwLock is poisoned. An RwLock
244 /// is poisoned whenever a writer panics while holding an exclusive lock.
245 /// An error will be returned when the lock is acquired.
249 /// This function might panic when called if the lock is already held by the current thread.
254 /// use std::sync::RwLock;
256 /// let lock = RwLock::new(1);
258 /// let mut n = lock.write().unwrap();
261 /// assert!(lock.try_read().is_err());
264 #[stable(feature = "rust1", since = "1.0.0")]
265 pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
268 RwLockWriteGuard::new(self)
272 /// Attempts to lock this rwlock with exclusive write access.
274 /// If the lock could not be acquired at this time, then `Err` is returned.
275 /// Otherwise, an RAII guard is returned which will release the lock when
278 /// This function does not block.
280 /// This function does not provide any guarantees with respect to the ordering
281 /// of whether contentious readers or writers will acquire the lock first.
285 /// This function will return an error if the RwLock is poisoned. An RwLock
286 /// is poisoned whenever a writer panics while holding an exclusive lock. An
287 /// error will only be returned if the lock would have otherwise been
293 /// use std::sync::RwLock;
295 /// let lock = RwLock::new(1);
297 /// let n = lock.read().unwrap();
298 /// assert_eq!(*n, 1);
300 /// assert!(lock.try_write().is_err());
303 #[stable(feature = "rust1", since = "1.0.0")]
304 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
306 if self.inner.try_write() {
307 Ok(RwLockWriteGuard::new(self)?)
309 Err(TryLockError::WouldBlock)
314 /// Determines whether the lock is poisoned.
316 /// If another thread is active, the lock can still become poisoned at any
317 /// time. You should not trust a `false` value for program correctness
318 /// without additional synchronization.
323 /// use std::sync::{Arc, RwLock};
326 /// let lock = Arc::new(RwLock::new(0));
327 /// let c_lock = lock.clone();
329 /// let _ = thread::spawn(move || {
330 /// let _lock = c_lock.write().unwrap();
331 /// panic!(); // the lock gets poisoned
333 /// assert_eq!(lock.is_poisoned(), true);
336 #[stable(feature = "sync_poison", since = "1.2.0")]
337 pub fn is_poisoned(&self) -> bool {
341 /// Consumes this `RwLock`, returning the underlying data.
345 /// This function will return an error if the RwLock is poisoned. An RwLock
346 /// is poisoned whenever a writer panics while holding an exclusive lock. An
347 /// error will only be returned if the lock would have otherwise been
353 /// use std::sync::RwLock;
355 /// let lock = RwLock::new(String::new());
357 /// let mut s = lock.write().unwrap();
358 /// *s = "modified".to_owned();
360 /// assert_eq!(lock.into_inner().unwrap(), "modified");
362 #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
363 pub fn into_inner(self) -> LockResult<T> where T: Sized {
364 // We know statically that there are no outstanding references to
365 // `self` so there's no need to lock the inner lock.
367 // To get the inner value, we'd like to call `data.into_inner()`,
368 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
369 // we'll have to destructure it manually instead.
371 // Like `let RwLock { inner, poison, data } = self`.
372 let (inner, poison, data) = {
373 let RwLock { ref inner, ref poison, ref data } = self;
374 (ptr::read(inner), ptr::read(poison), ptr::read(data))
377 inner.destroy(); // Keep in sync with the `Drop` impl.
380 poison::map_result(poison.borrow(), |_| data.into_inner())
384 /// Returns a mutable reference to the underlying data.
386 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
387 /// take place -- the mutable borrow statically guarantees no locks exist.
391 /// This function will return an error if the RwLock is poisoned. An RwLock
392 /// is poisoned whenever a writer panics while holding an exclusive lock. An
393 /// error will only be returned if the lock would have otherwise been
399 /// use std::sync::RwLock;
401 /// let mut lock = RwLock::new(0);
402 /// *lock.get_mut().unwrap() = 10;
403 /// assert_eq!(*lock.read().unwrap(), 10);
405 #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
406 pub fn get_mut(&mut self) -> LockResult<&mut T> {
407 // We know statically that there are no other references to `self`, so
408 // there's no need to lock the inner lock.
409 let data = unsafe { &mut *self.data.get() };
410 poison::map_result(self.poison.borrow(), |_| data)
414 #[stable(feature = "rust1", since = "1.0.0")]
415 unsafe impl<#[may_dangle] T: ?Sized> Drop for RwLock<T> {
417 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
418 unsafe { self.inner.destroy() }
422 #[stable(feature = "rust1", since = "1.0.0")]
423 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
424 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
425 match self.try_read() {
426 Ok(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
427 Err(TryLockError::Poisoned(err)) => {
428 f.debug_struct("RwLock").field("data", &&**err.get_ref()).finish()
430 Err(TryLockError::WouldBlock) => {
431 struct LockedPlaceholder;
432 impl fmt::Debug for LockedPlaceholder {
433 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
434 f.write_str("<locked>")
438 f.debug_struct("RwLock").field("data", &LockedPlaceholder).finish()
444 #[stable(feature = "rw_lock_default", since = "1.10.0")]
445 impl<T: Default> Default for RwLock<T> {
446 /// Creates a new `RwLock<T>`, with the `Default` value for T.
447 fn default() -> RwLock<T> {
448 RwLock::new(Default::default())
452 #[stable(feature = "rw_lock_from", since = "1.24.0")]
453 impl<T> From<T> for RwLock<T> {
454 /// Creates a new instance of an `RwLock<T>` which is unlocked.
455 /// This is equivalent to [`RwLock::new`].
456 fn from(t: T) -> Self {
461 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
462 unsafe fn new(lock: &'rwlock RwLock<T>)
463 -> LockResult<RwLockReadGuard<'rwlock, T>> {
464 poison::map_result(lock.poison.borrow(), |_| {
472 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
473 unsafe fn new(lock: &'rwlock RwLock<T>)
474 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
475 poison::map_result(lock.poison.borrow(), |guard| {
484 #[stable(feature = "std_debug", since = "1.16.0")]
485 impl<T: fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
486 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
487 f.debug_struct("RwLockReadGuard")
488 .field("lock", &self.__lock)
493 #[stable(feature = "std_guard_impls", since = "1.20.0")]
494 impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
495 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
500 #[stable(feature = "std_debug", since = "1.16.0")]
501 impl<T: fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
502 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
503 f.debug_struct("RwLockWriteGuard")
504 .field("lock", &self.__lock)
509 #[stable(feature = "std_guard_impls", since = "1.20.0")]
510 impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
511 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
516 #[stable(feature = "rust1", since = "1.0.0")]
517 impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
520 fn deref(&self) -> &T {
521 unsafe { &*self.__lock.data.get() }
525 #[stable(feature = "rust1", since = "1.0.0")]
526 impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
529 fn deref(&self) -> &T {
530 unsafe { &*self.__lock.data.get() }
534 #[stable(feature = "rust1", since = "1.0.0")]
535 impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
536 fn deref_mut(&mut self) -> &mut T {
537 unsafe { &mut *self.__lock.data.get() }
541 #[stable(feature = "rust1", since = "1.0.0")]
542 impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
544 unsafe { self.__lock.inner.read_unlock(); }
548 #[stable(feature = "rust1", since = "1.0.0")]
549 impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
551 self.__lock.poison.done(&self.__poison);
552 unsafe { self.__lock.inner.write_unlock(); }
556 #[cfg(all(test, not(target_os = "emscripten")))]
558 use rand::{self, Rng};
559 use crate::sync::mpsc::channel;
561 use crate::sync::{Arc, RwLock, TryLockError};
562 use crate::sync::atomic::{AtomicUsize, Ordering};
564 #[derive(Eq, PartialEq, Debug)]
569 let l = RwLock::new(());
570 drop(l.read().unwrap());
571 drop(l.write().unwrap());
572 drop((l.read().unwrap(), l.read().unwrap()));
573 drop(l.write().unwrap());
579 const M: usize = 1000;
581 let r = Arc::new(RwLock::new(()));
583 let (tx, rx) = channel::<()>();
587 thread::spawn(move || {
588 let mut rng = rand::thread_rng();
590 if rng.gen_bool(1.0 / (N as f64)) {
591 drop(r.write().unwrap());
593 drop(r.read().unwrap());
604 fn test_rw_arc_poison_wr() {
605 let arc = Arc::new(RwLock::new(1));
606 let arc2 = arc.clone();
607 let _: Result<(), _> = thread::spawn(move || {
608 let _lock = arc2.write().unwrap();
611 assert!(arc.read().is_err());
615 fn test_rw_arc_poison_ww() {
616 let arc = Arc::new(RwLock::new(1));
617 assert!(!arc.is_poisoned());
618 let arc2 = arc.clone();
619 let _: Result<(), _> = thread::spawn(move || {
620 let _lock = arc2.write().unwrap();
623 assert!(arc.write().is_err());
624 assert!(arc.is_poisoned());
628 fn test_rw_arc_no_poison_rr() {
629 let arc = Arc::new(RwLock::new(1));
630 let arc2 = arc.clone();
631 let _: Result<(), _> = thread::spawn(move || {
632 let _lock = arc2.read().unwrap();
635 let lock = arc.read().unwrap();
636 assert_eq!(*lock, 1);
639 fn test_rw_arc_no_poison_rw() {
640 let arc = Arc::new(RwLock::new(1));
641 let arc2 = arc.clone();
642 let _: Result<(), _> = thread::spawn(move || {
643 let _lock = arc2.read().unwrap();
646 let lock = arc.write().unwrap();
647 assert_eq!(*lock, 1);
652 let arc = Arc::new(RwLock::new(0));
653 let arc2 = arc.clone();
654 let (tx, rx) = channel();
656 thread::spawn(move || {
657 let mut lock = arc2.write().unwrap();
664 tx.send(()).unwrap();
667 // Readers try to catch the writer in the act
668 let mut children = Vec::new();
670 let arc3 = arc.clone();
671 children.push(thread::spawn(move || {
672 let lock = arc3.read().unwrap();
677 // Wait for children to pass their asserts
679 assert!(r.join().is_ok());
682 // Wait for writer to finish
684 let lock = arc.read().unwrap();
685 assert_eq!(*lock, 10);
689 fn test_rw_arc_access_in_unwind() {
690 let arc = Arc::new(RwLock::new(1));
691 let arc2 = arc.clone();
692 let _ = thread::spawn(move || -> () {
694 i: Arc<RwLock<isize>>,
696 impl Drop for Unwinder {
698 let mut lock = self.i.write().unwrap();
702 let _u = Unwinder { i: arc2 };
705 let lock = arc.read().unwrap();
706 assert_eq!(*lock, 2);
710 fn test_rwlock_unsized() {
711 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
713 let b = &mut *rw.write().unwrap();
717 let comp: &[i32] = &[4, 2, 5];
718 assert_eq!(&*rw.read().unwrap(), comp);
722 fn test_rwlock_try_write() {
723 let lock = RwLock::new(0isize);
724 let read_guard = lock.read().unwrap();
726 let write_result = lock.try_write();
728 Err(TryLockError::WouldBlock) => (),
729 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
730 Err(_) => assert!(false, "unexpected error"),
737 fn test_into_inner() {
738 let m = RwLock::new(NonCopy(10));
739 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
743 fn test_into_inner_drop() {
744 struct Foo(Arc<AtomicUsize>);
747 self.0.fetch_add(1, Ordering::SeqCst);
750 let num_drops = Arc::new(AtomicUsize::new(0));
751 let m = RwLock::new(Foo(num_drops.clone()));
752 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
754 let _inner = m.into_inner().unwrap();
755 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
757 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
761 fn test_into_inner_poison() {
762 let m = Arc::new(RwLock::new(NonCopy(10)));
764 let _ = thread::spawn(move || {
765 let _lock = m2.write().unwrap();
766 panic!("test panic in inner thread to poison RwLock");
769 assert!(m.is_poisoned());
770 match Arc::try_unwrap(m).unwrap().into_inner() {
771 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
772 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
778 let mut m = RwLock::new(NonCopy(10));
779 *m.get_mut().unwrap() = NonCopy(20);
780 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
784 fn test_get_mut_poison() {
785 let m = Arc::new(RwLock::new(NonCopy(10)));
787 let _ = thread::spawn(move || {
788 let _lock = m2.write().unwrap();
789 panic!("test panic in inner thread to poison RwLock");
792 assert!(m.is_poisoned());
793 match Arc::try_unwrap(m).unwrap().get_mut() {
794 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
795 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),