1 use crate::cell::UnsafeCell;
4 use crate::ops::{Deref, DerefMut};
6 use crate::sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
7 use crate::sys_common::rwlock as sys;
9 /// A reader-writer lock
11 /// This type of lock allows a number of readers or at most one writer at any
12 /// point in time. The write portion of this lock typically allows modification
13 /// of the underlying data (exclusive access) and the read portion of this lock
14 /// typically allows for read-only access (shared access).
16 /// In comparison, a [`Mutex`] does not distinguish between readers or writers
17 /// that acquire the lock, therefore blocking any threads waiting for the lock to
18 /// become available. An `RwLock` will allow any number of readers to acquire the
19 /// lock as long as a writer is not holding the lock.
21 /// The priority policy of the lock is dependent on the underlying operating
22 /// system's implementation, and this type does not guarantee that any
23 /// particular policy will be used.
25 /// The type parameter `T` represents the data that this lock protects. It is
26 /// required that `T` satisfies [`Send`] to be shared across threads and
27 /// [`Sync`] to allow concurrent access through readers. The RAII guards
28 /// returned from the locking methods implement [`Deref`] (and [`DerefMut`]
29 /// for the `write` methods) to allow access to the content of the lock.
33 /// An `RwLock`, like [`Mutex`], will become poisoned on a panic. Note, however,
34 /// that an `RwLock` may only be poisoned if a panic occurs while it is locked
35 /// exclusively (write mode). If a panic occurs in any reader, then the lock
36 /// will not be poisoned.
41 /// use std::sync::RwLock;
43 /// let lock = RwLock::new(5);
45 /// // many reader locks can be held at once
47 /// let r1 = lock.read().unwrap();
48 /// let r2 = lock.read().unwrap();
49 /// assert_eq!(*r1, 5);
50 /// assert_eq!(*r2, 5);
51 /// } // read locks are dropped at this point
53 /// // only one write lock may be held, however
55 /// let mut w = lock.write().unwrap();
57 /// assert_eq!(*w, 6);
58 /// } // write lock is dropped here
61 /// [`Deref`]: ../../std/ops/trait.Deref.html
62 /// [`DerefMut`]: ../../std/ops/trait.DerefMut.html
63 /// [`Send`]: ../../std/marker/trait.Send.html
64 /// [`Sync`]: ../../std/marker/trait.Sync.html
65 /// [`Mutex`]: struct.Mutex.html
66 #[stable(feature = "rust1", since = "1.0.0")]
67 pub struct RwLock<T: ?Sized> {
68 inner: Box<sys::RWLock>,
73 #[stable(feature = "rust1", since = "1.0.0")]
74 unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
75 #[stable(feature = "rust1", since = "1.0.0")]
76 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
78 /// RAII structure used to release the shared read access of a lock when
81 /// This structure is created by the [`read`] and [`try_read`] methods on
84 /// [`read`]: struct.RwLock.html#method.read
85 /// [`try_read`]: struct.RwLock.html#method.try_read
86 /// [`RwLock`]: struct.RwLock.html
87 #[must_use = "if unused the RwLock will immediately unlock"]
88 #[stable(feature = "rust1", since = "1.0.0")]
89 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
93 #[stable(feature = "rust1", since = "1.0.0")]
94 impl<T: ?Sized> !Send for RwLockReadGuard<'_, T> {}
96 #[stable(feature = "rwlock_guard_sync", since = "1.23.0")]
97 unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
99 /// RAII structure used to release the exclusive write access of a lock when
102 /// This structure is created by the [`write`] and [`try_write`] methods
105 /// [`write`]: struct.RwLock.html#method.write
106 /// [`try_write`]: struct.RwLock.html#method.try_write
107 /// [`RwLock`]: struct.RwLock.html
108 #[must_use = "if unused the RwLock will immediately unlock"]
109 #[stable(feature = "rust1", since = "1.0.0")]
110 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
112 poison: poison::Guard,
115 #[stable(feature = "rust1", since = "1.0.0")]
116 impl<T: ?Sized> !Send for RwLockWriteGuard<'_, T> {}
118 #[stable(feature = "rwlock_guard_sync", since = "1.23.0")]
119 unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
122 /// Creates a new instance of an `RwLock<T>` which is unlocked.
127 /// use std::sync::RwLock;
129 /// let lock = RwLock::new(5);
131 #[stable(feature = "rust1", since = "1.0.0")]
132 pub fn new(t: T) -> RwLock<T> {
134 inner: box sys::RWLock::new(),
135 poison: poison::Flag::new(),
136 data: UnsafeCell::new(t),
141 impl<T: ?Sized> RwLock<T> {
142 /// Locks this rwlock with shared read access, blocking the current thread
143 /// until it can be acquired.
145 /// The calling thread will be blocked until there are no more writers which
146 /// hold the lock. There may be other readers currently inside the lock when
147 /// this method returns. This method does not provide any guarantees with
148 /// respect to the ordering of whether contentious readers or writers will
149 /// acquire the lock first.
151 /// Returns an RAII guard which will release this thread's shared access
152 /// once it is dropped.
156 /// This function will return an error if the RwLock is poisoned. An RwLock
157 /// is poisoned whenever a writer panics while holding an exclusive lock.
158 /// The failure will occur immediately after the lock has been acquired.
162 /// This function might panic when called if the lock is already held by the current thread.
167 /// use std::sync::{Arc, RwLock};
170 /// let lock = Arc::new(RwLock::new(1));
171 /// let c_lock = lock.clone();
173 /// let n = lock.read().unwrap();
174 /// assert_eq!(*n, 1);
176 /// thread::spawn(move || {
177 /// let r = c_lock.read();
178 /// assert!(r.is_ok());
179 /// }).join().unwrap();
182 #[stable(feature = "rust1", since = "1.0.0")]
183 pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
186 RwLockReadGuard::new(self)
190 /// Attempts to acquire this rwlock with shared read access.
192 /// If the access could not be granted at this time, then `Err` is returned.
193 /// Otherwise, an RAII guard is returned which will release the shared access
194 /// when it is dropped.
196 /// This function does not block.
198 /// This function does not provide any guarantees with respect to the ordering
199 /// of whether contentious readers or writers will acquire the lock first.
203 /// This function will return an error if the RwLock is poisoned. An RwLock
204 /// is poisoned whenever a writer panics while holding an exclusive lock. An
205 /// error will only be returned if the lock would have otherwise been
211 /// use std::sync::RwLock;
213 /// let lock = RwLock::new(1);
215 /// match lock.try_read() {
216 /// Ok(n) => assert_eq!(*n, 1),
217 /// Err(_) => unreachable!(),
221 #[stable(feature = "rust1", since = "1.0.0")]
222 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
224 if self.inner.try_read() {
225 Ok(RwLockReadGuard::new(self)?)
227 Err(TryLockError::WouldBlock)
232 /// Locks this rwlock with exclusive write access, blocking the current
233 /// thread until it can be acquired.
235 /// This function will not return while other writers or other readers
236 /// currently have access to the lock.
238 /// Returns an RAII guard which will drop the write access of this rwlock
243 /// This function will return an error if the RwLock is poisoned. An RwLock
244 /// is poisoned whenever a writer panics while holding an exclusive lock.
245 /// An error will be returned when the lock is acquired.
249 /// This function might panic when called if the lock is already held by the current thread.
254 /// use std::sync::RwLock;
256 /// let lock = RwLock::new(1);
258 /// let mut n = lock.write().unwrap();
261 /// assert!(lock.try_read().is_err());
264 #[stable(feature = "rust1", since = "1.0.0")]
265 pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
268 RwLockWriteGuard::new(self)
272 /// Attempts to lock this rwlock with exclusive write access.
274 /// If the lock could not be acquired at this time, then `Err` is returned.
275 /// Otherwise, an RAII guard is returned which will release the lock when
278 /// This function does not block.
280 /// This function does not provide any guarantees with respect to the ordering
281 /// of whether contentious readers or writers will acquire the lock first.
285 /// This function will return an error if the RwLock is poisoned. An RwLock
286 /// is poisoned whenever a writer panics while holding an exclusive lock. An
287 /// error will only be returned if the lock would have otherwise been
293 /// use std::sync::RwLock;
295 /// let lock = RwLock::new(1);
297 /// let n = lock.read().unwrap();
298 /// assert_eq!(*n, 1);
300 /// assert!(lock.try_write().is_err());
303 #[stable(feature = "rust1", since = "1.0.0")]
304 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
306 if self.inner.try_write() {
307 Ok(RwLockWriteGuard::new(self)?)
309 Err(TryLockError::WouldBlock)
314 /// Determines whether the lock is poisoned.
316 /// If another thread is active, the lock can still become poisoned at any
317 /// time. You should not trust a `false` value for program correctness
318 /// without additional synchronization.
323 /// use std::sync::{Arc, RwLock};
326 /// let lock = Arc::new(RwLock::new(0));
327 /// let c_lock = lock.clone();
329 /// let _ = thread::spawn(move || {
330 /// let _lock = c_lock.write().unwrap();
331 /// panic!(); // the lock gets poisoned
333 /// assert_eq!(lock.is_poisoned(), true);
336 #[stable(feature = "sync_poison", since = "1.2.0")]
337 pub fn is_poisoned(&self) -> bool {
341 /// Consumes this `RwLock`, returning the underlying data.
345 /// This function will return an error if the RwLock is poisoned. An RwLock
346 /// is poisoned whenever a writer panics while holding an exclusive lock. An
347 /// error will only be returned if the lock would have otherwise been
353 /// use std::sync::RwLock;
355 /// let lock = RwLock::new(String::new());
357 /// let mut s = lock.write().unwrap();
358 /// *s = "modified".to_owned();
360 /// assert_eq!(lock.into_inner().unwrap(), "modified");
362 #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
363 pub fn into_inner(self) -> LockResult<T>
367 // We know statically that there are no outstanding references to
368 // `self` so there's no need to lock the inner lock.
370 // To get the inner value, we'd like to call `data.into_inner()`,
371 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
372 // we'll have to destructure it manually instead.
374 // Like `let RwLock { inner, poison, data } = self`.
375 let (inner, poison, data) = {
376 let RwLock { ref inner, ref poison, ref data } = self;
377 (ptr::read(inner), ptr::read(poison), ptr::read(data))
380 inner.destroy(); // Keep in sync with the `Drop` impl.
383 poison::map_result(poison.borrow(), |_| data.into_inner())
387 /// Returns a mutable reference to the underlying data.
389 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
390 /// take place -- the mutable borrow statically guarantees no locks exist.
394 /// This function will return an error if the RwLock is poisoned. An RwLock
395 /// is poisoned whenever a writer panics while holding an exclusive lock. An
396 /// error will only be returned if the lock would have otherwise been
402 /// use std::sync::RwLock;
404 /// let mut lock = RwLock::new(0);
405 /// *lock.get_mut().unwrap() = 10;
406 /// assert_eq!(*lock.read().unwrap(), 10);
408 #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
409 pub fn get_mut(&mut self) -> LockResult<&mut T> {
410 // We know statically that there are no other references to `self`, so
411 // there's no need to lock the inner lock.
412 let data = unsafe { &mut *self.data.get() };
413 poison::map_result(self.poison.borrow(), |_| data)
417 #[stable(feature = "rust1", since = "1.0.0")]
418 unsafe impl<#[may_dangle] T: ?Sized> Drop for RwLock<T> {
420 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
421 unsafe { self.inner.destroy() }
425 #[stable(feature = "rust1", since = "1.0.0")]
426 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
427 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
428 match self.try_read() {
429 Ok(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
430 Err(TryLockError::Poisoned(err)) => {
431 f.debug_struct("RwLock").field("data", &&**err.get_ref()).finish()
433 Err(TryLockError::WouldBlock) => {
434 struct LockedPlaceholder;
435 impl fmt::Debug for LockedPlaceholder {
436 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
437 f.write_str("<locked>")
441 f.debug_struct("RwLock").field("data", &LockedPlaceholder).finish()
447 #[stable(feature = "rw_lock_default", since = "1.10.0")]
448 impl<T: Default> Default for RwLock<T> {
449 /// Creates a new `RwLock<T>`, with the `Default` value for T.
450 fn default() -> RwLock<T> {
451 RwLock::new(Default::default())
455 #[stable(feature = "rw_lock_from", since = "1.24.0")]
456 impl<T> From<T> for RwLock<T> {
457 /// Creates a new instance of an `RwLock<T>` which is unlocked.
458 /// This is equivalent to [`RwLock::new`].
460 /// [`RwLock::new`]: ../../std/sync/struct.RwLock.html#method.new
461 fn from(t: T) -> Self {
466 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
467 unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockReadGuard<'rwlock, T>> {
468 poison::map_result(lock.poison.borrow(), |_| RwLockReadGuard { lock: lock })
472 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
473 unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> {
474 poison::map_result(lock.poison.borrow(), |guard| RwLockWriteGuard {
481 #[stable(feature = "std_debug", since = "1.16.0")]
482 impl<T: fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
483 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
484 f.debug_struct("RwLockReadGuard").field("lock", &self.lock).finish()
488 #[stable(feature = "std_guard_impls", since = "1.20.0")]
489 impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
490 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
495 #[stable(feature = "std_debug", since = "1.16.0")]
496 impl<T: fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
497 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
498 f.debug_struct("RwLockWriteGuard").field("lock", &self.lock).finish()
502 #[stable(feature = "std_guard_impls", since = "1.20.0")]
503 impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
504 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
509 #[stable(feature = "rust1", since = "1.0.0")]
510 impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
513 fn deref(&self) -> &T {
514 unsafe { &*self.lock.data.get() }
518 #[stable(feature = "rust1", since = "1.0.0")]
519 impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
522 fn deref(&self) -> &T {
523 unsafe { &*self.lock.data.get() }
527 #[stable(feature = "rust1", since = "1.0.0")]
528 impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
529 fn deref_mut(&mut self) -> &mut T {
530 unsafe { &mut *self.lock.data.get() }
534 #[stable(feature = "rust1", since = "1.0.0")]
535 impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
538 self.lock.inner.read_unlock();
543 #[stable(feature = "rust1", since = "1.0.0")]
544 impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
546 self.lock.poison.done(&self.poison);
548 self.lock.inner.write_unlock();
553 #[cfg(all(test, not(target_os = "emscripten")))]
555 use crate::sync::atomic::{AtomicUsize, Ordering};
556 use crate::sync::mpsc::channel;
557 use crate::sync::{Arc, RwLock, TryLockError};
559 use rand::{self, Rng};
561 #[derive(Eq, PartialEq, Debug)]
566 let l = RwLock::new(());
567 drop(l.read().unwrap());
568 drop(l.write().unwrap());
569 drop((l.read().unwrap(), l.read().unwrap()));
570 drop(l.write().unwrap());
576 const M: usize = 1000;
578 let r = Arc::new(RwLock::new(()));
580 let (tx, rx) = channel::<()>();
584 thread::spawn(move || {
585 let mut rng = rand::thread_rng();
587 if rng.gen_bool(1.0 / (N as f64)) {
588 drop(r.write().unwrap());
590 drop(r.read().unwrap());
601 fn test_rw_arc_poison_wr() {
602 let arc = Arc::new(RwLock::new(1));
603 let arc2 = arc.clone();
604 let _: Result<(), _> = thread::spawn(move || {
605 let _lock = arc2.write().unwrap();
609 assert!(arc.read().is_err());
613 fn test_rw_arc_poison_ww() {
614 let arc = Arc::new(RwLock::new(1));
615 assert!(!arc.is_poisoned());
616 let arc2 = arc.clone();
617 let _: Result<(), _> = thread::spawn(move || {
618 let _lock = arc2.write().unwrap();
622 assert!(arc.write().is_err());
623 assert!(arc.is_poisoned());
627 fn test_rw_arc_no_poison_rr() {
628 let arc = Arc::new(RwLock::new(1));
629 let arc2 = arc.clone();
630 let _: Result<(), _> = thread::spawn(move || {
631 let _lock = arc2.read().unwrap();
635 let lock = arc.read().unwrap();
636 assert_eq!(*lock, 1);
639 fn test_rw_arc_no_poison_rw() {
640 let arc = Arc::new(RwLock::new(1));
641 let arc2 = arc.clone();
642 let _: Result<(), _> = thread::spawn(move || {
643 let _lock = arc2.read().unwrap();
647 let lock = arc.write().unwrap();
648 assert_eq!(*lock, 1);
653 let arc = Arc::new(RwLock::new(0));
654 let arc2 = arc.clone();
655 let (tx, rx) = channel();
657 thread::spawn(move || {
658 let mut lock = arc2.write().unwrap();
665 tx.send(()).unwrap();
668 // Readers try to catch the writer in the act
669 let mut children = Vec::new();
671 let arc3 = arc.clone();
672 children.push(thread::spawn(move || {
673 let lock = arc3.read().unwrap();
678 // Wait for children to pass their asserts
680 assert!(r.join().is_ok());
683 // Wait for writer to finish
685 let lock = arc.read().unwrap();
686 assert_eq!(*lock, 10);
690 fn test_rw_arc_access_in_unwind() {
691 let arc = Arc::new(RwLock::new(1));
692 let arc2 = arc.clone();
693 let _ = thread::spawn(move || -> () {
695 i: Arc<RwLock<isize>>,
697 impl Drop for Unwinder {
699 let mut lock = self.i.write().unwrap();
703 let _u = Unwinder { i: arc2 };
707 let lock = arc.read().unwrap();
708 assert_eq!(*lock, 2);
712 fn test_rwlock_unsized() {
713 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
715 let b = &mut *rw.write().unwrap();
719 let comp: &[i32] = &[4, 2, 5];
720 assert_eq!(&*rw.read().unwrap(), comp);
724 fn test_rwlock_try_write() {
725 let lock = RwLock::new(0isize);
726 let read_guard = lock.read().unwrap();
728 let write_result = lock.try_write();
730 Err(TryLockError::WouldBlock) => (),
731 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
732 Err(_) => assert!(false, "unexpected error"),
739 fn test_into_inner() {
740 let m = RwLock::new(NonCopy(10));
741 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
745 fn test_into_inner_drop() {
746 struct Foo(Arc<AtomicUsize>);
749 self.0.fetch_add(1, Ordering::SeqCst);
752 let num_drops = Arc::new(AtomicUsize::new(0));
753 let m = RwLock::new(Foo(num_drops.clone()));
754 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
756 let _inner = m.into_inner().unwrap();
757 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
759 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
763 fn test_into_inner_poison() {
764 let m = Arc::new(RwLock::new(NonCopy(10)));
766 let _ = thread::spawn(move || {
767 let _lock = m2.write().unwrap();
768 panic!("test panic in inner thread to poison RwLock");
772 assert!(m.is_poisoned());
773 match Arc::try_unwrap(m).unwrap().into_inner() {
774 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
775 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
781 let mut m = RwLock::new(NonCopy(10));
782 *m.get_mut().unwrap() = NonCopy(20);
783 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
787 fn test_get_mut_poison() {
788 let m = Arc::new(RwLock::new(NonCopy(10)));
790 let _ = thread::spawn(move || {
791 let _lock = m2.write().unwrap();
792 panic!("test panic in inner thread to poison RwLock");
796 assert!(m.is_poisoned());
797 match Arc::try_unwrap(m).unwrap().get_mut() {
798 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
799 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),