1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops::{Deref, DerefMut};
19 use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
20 use sys_common::rwlock as sys;
22 /// A reader-writer lock
24 /// This type of lock allows a number of readers or at most one writer at any
25 /// point in time. The write portion of this lock typically allows modification
26 /// of the underlying data (exclusive access) and the read portion of this lock
27 /// typically allows for read-only access (shared access).
29 /// The priority policy of the lock is dependent on the underlying operating
30 /// system's implementation, and this type does not guarantee that any
31 /// particular policy will be used.
33 /// The type parameter `T` represents the data that this lock protects. It is
34 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
35 /// allow concurrent access through readers. The RAII guards returned from the
36 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
37 /// to allow access to the contained of the lock.
41 /// An `RwLock`, like `Mutex`, will become poisoned on a panic. Note, however,
42 /// that an `RwLock` may only be poisoned if a panic occurs while it is locked
43 /// exclusively (write mode). If a panic occurs in any reader, then the lock
44 /// will not be poisoned.
49 /// use std::sync::RwLock;
51 /// let lock = RwLock::new(5);
53 /// // many reader locks can be held at once
55 /// let r1 = lock.read().unwrap();
56 /// let r2 = lock.read().unwrap();
57 /// assert_eq!(*r1, 5);
58 /// assert_eq!(*r2, 5);
59 /// } // read locks are dropped at this point
61 /// // only one write lock may be held, however
63 /// let mut w = lock.write().unwrap();
65 /// assert_eq!(*w, 6);
66 /// } // write lock is dropped here
68 #[stable(feature = "rust1", since = "1.0.0")]
69 pub struct RwLock<T: ?Sized> {
70 inner: Box<sys::RWLock>,
75 #[stable(feature = "rust1", since = "1.0.0")]
76 unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {}
77 #[stable(feature = "rust1", since = "1.0.0")]
78 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
80 /// RAII structure used to release the shared read access of a lock when
83 #[stable(feature = "rust1", since = "1.0.0")]
84 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
85 __lock: &'a RwLock<T>,
88 #[stable(feature = "rust1", since = "1.0.0")]
89 impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {}
91 /// RAII structure used to release the exclusive write access of a lock when
94 #[stable(feature = "rust1", since = "1.0.0")]
95 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
96 __lock: &'a RwLock<T>,
97 __poison: poison::Guard,
100 #[stable(feature = "rust1", since = "1.0.0")]
101 impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {}
104 /// Creates a new instance of an `RwLock<T>` which is unlocked.
109 /// use std::sync::RwLock;
111 /// let lock = RwLock::new(5);
113 #[stable(feature = "rust1", since = "1.0.0")]
114 pub fn new(t: T) -> RwLock<T> {
116 inner: box sys::RWLock::new(),
117 poison: poison::Flag::new(),
118 data: UnsafeCell::new(t),
123 impl<T: ?Sized> RwLock<T> {
124 /// Locks this rwlock with shared read access, blocking the current thread
125 /// until it can be acquired.
127 /// The calling thread will be blocked until there are no more writers which
128 /// hold the lock. There may be other readers currently inside the lock when
129 /// this method returns. This method does not provide any guarantees with
130 /// respect to the ordering of whether contentious readers or writers will
131 /// acquire the lock first.
133 /// Returns an RAII guard which will release this thread's shared access
134 /// once it is dropped.
138 /// This function will return an error if the RwLock is poisoned. An RwLock
139 /// is poisoned whenever a writer panics while holding an exclusive lock.
140 /// The failure will occur immediately after the lock has been acquired.
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
146 RwLockReadGuard::new(self)
150 /// Attempts to acquire this rwlock with shared read access.
152 /// If the access could not be granted at this time, then `Err` is returned.
153 /// Otherwise, an RAII guard is returned which will release the shared access
154 /// when it is dropped.
156 /// This function does not block.
158 /// This function does not provide any guarantees with respect to the ordering
159 /// of whether contentious readers or writers will acquire the lock first.
163 /// This function will return an error if the RwLock is poisoned. An RwLock
164 /// is poisoned whenever a writer panics while holding an exclusive lock. An
165 /// error will only be returned if the lock would have otherwise been
168 #[stable(feature = "rust1", since = "1.0.0")]
169 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
171 if self.inner.try_read() {
172 Ok(RwLockReadGuard::new(self)?)
174 Err(TryLockError::WouldBlock)
179 /// Locks this rwlock with exclusive write access, blocking the current
180 /// thread until it can be acquired.
182 /// This function will not return while other writers or other readers
183 /// currently have access to the lock.
185 /// Returns an RAII guard which will drop the write access of this rwlock
190 /// This function will return an error if the RwLock is poisoned. An RwLock
191 /// is poisoned whenever a writer panics while holding an exclusive lock.
192 /// An error will be returned when the lock is acquired.
194 #[stable(feature = "rust1", since = "1.0.0")]
195 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
198 RwLockWriteGuard::new(self)
202 /// Attempts to lock this rwlock with exclusive write access.
204 /// If the lock could not be acquired at this time, then `Err` is returned.
205 /// Otherwise, an RAII guard is returned which will release the lock when
208 /// This function does not block.
210 /// This function does not provide any guarantees with respect to the ordering
211 /// of whether contentious readers or writers will acquire the lock first.
215 /// This function will return an error if the RwLock is poisoned. An RwLock
216 /// is poisoned whenever a writer panics while holding an exclusive lock. An
217 /// error will only be returned if the lock would have otherwise been
220 #[stable(feature = "rust1", since = "1.0.0")]
221 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
223 if self.inner.try_write() {
224 Ok(RwLockWriteGuard::new(self)?)
226 Err(TryLockError::WouldBlock)
231 /// Determines whether the lock is poisoned.
233 /// If another thread is active, the lock can still become poisoned at any
234 /// time. You should not trust a `false` value for program correctness
235 /// without additional synchronization.
237 #[stable(feature = "sync_poison", since = "1.2.0")]
238 pub fn is_poisoned(&self) -> bool {
242 /// Consumes this `RwLock`, returning the underlying data.
246 /// This function will return an error if the RwLock is poisoned. An RwLock
247 /// is poisoned whenever a writer panics while holding an exclusive lock. An
248 /// error will only be returned if the lock would have otherwise been
250 #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
251 pub fn into_inner(self) -> LockResult<T> where T: Sized {
252 // We know statically that there are no outstanding references to
253 // `self` so there's no need to lock the inner lock.
255 // To get the inner value, we'd like to call `data.into_inner()`,
256 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
257 // we'll have to destructure it manually instead.
259 // Like `let RwLock { inner, poison, data } = self`.
260 let (inner, poison, data) = {
261 let RwLock { ref inner, ref poison, ref data } = self;
262 (ptr::read(inner), ptr::read(poison), ptr::read(data))
265 inner.destroy(); // Keep in sync with the `Drop` impl.
268 poison::map_result(poison.borrow(), |_| data.into_inner())
272 /// Returns a mutable reference to the underlying data.
274 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
275 /// take place---the mutable borrow statically guarantees no locks exist.
279 /// This function will return an error if the RwLock is poisoned. An RwLock
280 /// is poisoned whenever a writer panics while holding an exclusive lock. An
281 /// error will only be returned if the lock would have otherwise been
283 #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
284 pub fn get_mut(&mut self) -> LockResult<&mut T> {
285 // We know statically that there are no other references to `self`, so
286 // there's no need to lock the inner lock.
287 let data = unsafe { &mut *self.data.get() };
288 poison::map_result(self.poison.borrow(), |_| data)
292 #[stable(feature = "rust1", since = "1.0.0")]
293 impl<T: ?Sized> Drop for RwLock<T> {
294 #[unsafe_destructor_blind_to_params]
296 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
297 unsafe { self.inner.destroy() }
301 #[stable(feature = "rust1", since = "1.0.0")]
302 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
303 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
304 match self.try_read() {
305 Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
306 Err(TryLockError::Poisoned(err)) => {
307 write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref())
309 Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}")
314 #[stable(feature = "rw_lock_default", since = "1.9.0")]
315 impl<T: Default> Default for RwLock<T> {
316 fn default() -> RwLock<T> {
317 RwLock::new(Default::default())
321 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
322 unsafe fn new(lock: &'rwlock RwLock<T>)
323 -> LockResult<RwLockReadGuard<'rwlock, T>> {
324 poison::map_result(lock.poison.borrow(), |_| {
332 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
333 unsafe fn new(lock: &'rwlock RwLock<T>)
334 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
335 poison::map_result(lock.poison.borrow(), |guard| {
344 #[stable(feature = "rust1", since = "1.0.0")]
345 impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
348 fn deref(&self) -> &T {
349 unsafe { &*self.__lock.data.get() }
353 #[stable(feature = "rust1", since = "1.0.0")]
354 impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
357 fn deref(&self) -> &T {
358 unsafe { &*self.__lock.data.get() }
362 #[stable(feature = "rust1", since = "1.0.0")]
363 impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
364 fn deref_mut(&mut self) -> &mut T {
365 unsafe { &mut *self.__lock.data.get() }
369 #[stable(feature = "rust1", since = "1.0.0")]
370 impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
372 unsafe { self.__lock.inner.read_unlock(); }
376 #[stable(feature = "rust1", since = "1.0.0")]
377 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
379 self.__lock.poison.done(&self.__poison);
380 unsafe { self.__lock.inner.write_unlock(); }
386 #![allow(deprecated)] // rand
390 use rand::{self, Rng};
391 use sync::mpsc::channel;
393 use sync::{Arc, RwLock, TryLockError};
394 use sync::atomic::{AtomicUsize, Ordering};
396 #[derive(Eq, PartialEq, Debug)]
401 let l = RwLock::new(());
402 drop(l.read().unwrap());
403 drop(l.write().unwrap());
404 drop((l.read().unwrap(), l.read().unwrap()));
405 drop(l.write().unwrap());
411 const M: usize = 1000;
413 let r = Arc::new(RwLock::new(()));
415 let (tx, rx) = channel::<()>();
419 thread::spawn(move || {
420 let mut rng = rand::thread_rng();
422 if rng.gen_weighted_bool(N) {
423 drop(r.write().unwrap());
425 drop(r.read().unwrap());
436 fn test_rw_arc_poison_wr() {
437 let arc = Arc::new(RwLock::new(1));
438 let arc2 = arc.clone();
439 let _: Result<(), _> = thread::spawn(move|| {
440 let _lock = arc2.write().unwrap();
443 assert!(arc.read().is_err());
447 fn test_rw_arc_poison_ww() {
448 let arc = Arc::new(RwLock::new(1));
449 assert!(!arc.is_poisoned());
450 let arc2 = arc.clone();
451 let _: Result<(), _> = thread::spawn(move|| {
452 let _lock = arc2.write().unwrap();
455 assert!(arc.write().is_err());
456 assert!(arc.is_poisoned());
460 fn test_rw_arc_no_poison_rr() {
461 let arc = Arc::new(RwLock::new(1));
462 let arc2 = arc.clone();
463 let _: Result<(), _> = thread::spawn(move|| {
464 let _lock = arc2.read().unwrap();
467 let lock = arc.read().unwrap();
468 assert_eq!(*lock, 1);
471 fn test_rw_arc_no_poison_rw() {
472 let arc = Arc::new(RwLock::new(1));
473 let arc2 = arc.clone();
474 let _: Result<(), _> = thread::spawn(move|| {
475 let _lock = arc2.read().unwrap();
478 let lock = arc.write().unwrap();
479 assert_eq!(*lock, 1);
484 let arc = Arc::new(RwLock::new(0));
485 let arc2 = arc.clone();
486 let (tx, rx) = channel();
488 thread::spawn(move|| {
489 let mut lock = arc2.write().unwrap();
496 tx.send(()).unwrap();
499 // Readers try to catch the writer in the act
500 let mut children = Vec::new();
502 let arc3 = arc.clone();
503 children.push(thread::spawn(move|| {
504 let lock = arc3.read().unwrap();
509 // Wait for children to pass their asserts
511 assert!(r.join().is_ok());
514 // Wait for writer to finish
516 let lock = arc.read().unwrap();
517 assert_eq!(*lock, 10);
521 fn test_rw_arc_access_in_unwind() {
522 let arc = Arc::new(RwLock::new(1));
523 let arc2 = arc.clone();
524 let _ = thread::spawn(move|| -> () {
526 i: Arc<RwLock<isize>>,
528 impl Drop for Unwinder {
530 let mut lock = self.i.write().unwrap();
534 let _u = Unwinder { i: arc2 };
537 let lock = arc.read().unwrap();
538 assert_eq!(*lock, 2);
542 fn test_rwlock_unsized() {
543 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
545 let b = &mut *rw.write().unwrap();
549 let comp: &[i32] = &[4, 2, 5];
550 assert_eq!(&*rw.read().unwrap(), comp);
554 fn test_rwlock_try_write() {
557 let lock = RwLock::new(0isize);
558 let read_guard = lock.read().unwrap();
560 let write_result = lock.try_write();
562 Err(TryLockError::WouldBlock) => (),
563 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
564 Err(_) => assert!(false, "unexpected error"),
571 fn test_into_inner() {
572 let m = RwLock::new(NonCopy(10));
573 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
577 fn test_into_inner_drop() {
578 struct Foo(Arc<AtomicUsize>);
581 self.0.fetch_add(1, Ordering::SeqCst);
584 let num_drops = Arc::new(AtomicUsize::new(0));
585 let m = RwLock::new(Foo(num_drops.clone()));
586 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
588 let _inner = m.into_inner().unwrap();
589 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
591 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
595 fn test_into_inner_poison() {
596 let m = Arc::new(RwLock::new(NonCopy(10)));
598 let _ = thread::spawn(move || {
599 let _lock = m2.write().unwrap();
600 panic!("test panic in inner thread to poison RwLock");
603 assert!(m.is_poisoned());
604 match Arc::try_unwrap(m).unwrap().into_inner() {
605 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
606 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
612 let mut m = RwLock::new(NonCopy(10));
613 *m.get_mut().unwrap() = NonCopy(20);
614 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
618 fn test_get_mut_poison() {
619 let m = Arc::new(RwLock::new(NonCopy(10)));
621 let _ = thread::spawn(move || {
622 let _lock = m2.write().unwrap();
623 panic!("test panic in inner thread to poison RwLock");
626 assert!(m.is_poisoned());
627 match Arc::try_unwrap(m).unwrap().get_mut() {
628 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
629 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),