1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops::{Deref, DerefMut};
19 use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
20 use sys_common::rwlock as sys;
22 /// A reader-writer lock
24 /// This type of lock allows a number of readers or at most one writer at any
25 /// point in time. The write portion of this lock typically allows modification
26 /// of the underlying data (exclusive access) and the read portion of this lock
27 /// typically allows for read-only access (shared access).
29 /// The priority policy of the lock is dependent on the underlying operating
30 /// system's implementation, and this type does not guarantee that any
31 /// particular policy will be used.
33 /// The type parameter `T` represents the data that this lock protects. It is
34 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
35 /// allow concurrent access through readers. The RAII guards returned from the
36 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
37 /// to allow access to the contained of the lock.
41 /// RwLocks, like Mutexes, will become poisoned on panics. Note, however, that
42 /// an RwLock may only be poisoned if a panic occurs while it is locked
43 /// exclusively (write mode). If a panic occurs in any reader, then the lock
44 /// will not be poisoned.
49 /// use std::sync::RwLock;
51 /// let lock = RwLock::new(5);
53 /// // many reader locks can be held at once
55 /// let r1 = lock.read().unwrap();
56 /// let r2 = lock.read().unwrap();
57 /// assert_eq!(*r1, 5);
58 /// assert_eq!(*r2, 5);
59 /// } // read locks are dropped at this point
61 /// // only one write lock may be held, however
63 /// let mut w = lock.write().unwrap();
65 /// assert_eq!(*w, 6);
66 /// } // write lock is dropped here
68 #[stable(feature = "rust1", since = "1.0.0")]
69 pub struct RwLock<T: ?Sized> {
70 inner: Box<StaticRwLock>,
74 #[stable(feature = "rust1", since = "1.0.0")]
75 unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {}
76 #[stable(feature = "rust1", since = "1.0.0")]
77 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
79 /// Structure representing a statically allocated RwLock.
81 /// This structure is intended to be used inside of a `static` and will provide
82 /// automatic global access as well as lazy initialization. The internal
83 /// resources of this RwLock, however, must be manually deallocated.
88 /// #![feature(static_rwlock)]
90 /// use std::sync::{StaticRwLock, RW_LOCK_INIT};
92 /// static LOCK: StaticRwLock = RW_LOCK_INIT;
95 /// let _g = LOCK.read().unwrap();
96 /// // ... shared read access
99 /// let _g = LOCK.write().unwrap();
100 /// // ... exclusive write access
102 /// unsafe { LOCK.destroy() } // free all resources
104 #[unstable(feature = "static_rwlock",
105 reason = "may be merged with RwLock in the future",
107 pub struct StaticRwLock {
109 poison: poison::Flag,
112 /// Constant initialization for a statically-initialized rwlock.
113 #[unstable(feature = "static_rwlock",
114 reason = "may be merged with RwLock in the future",
116 pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
118 /// RAII structure used to release the shared read access of a lock when
121 #[stable(feature = "rust1", since = "1.0.0")]
122 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
123 __lock: &'a StaticRwLock,
124 __data: &'a UnsafeCell<T>,
127 #[stable(feature = "rust1", since = "1.0.0")]
128 impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {}
130 /// RAII structure used to release the exclusive write access of a lock when
133 #[stable(feature = "rust1", since = "1.0.0")]
134 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
135 __lock: &'a StaticRwLock,
136 __data: &'a UnsafeCell<T>,
137 __poison: poison::Guard,
140 #[stable(feature = "rust1", since = "1.0.0")]
141 impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {}
144 /// Creates a new instance of an `RwLock<T>` which is unlocked.
149 /// use std::sync::RwLock;
151 /// let lock = RwLock::new(5);
153 #[stable(feature = "rust1", since = "1.0.0")]
154 pub fn new(t: T) -> RwLock<T> {
155 RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
159 impl<T: ?Sized> RwLock<T> {
160 /// Locks this rwlock with shared read access, blocking the current thread
161 /// until it can be acquired.
163 /// The calling thread will be blocked until there are no more writers which
164 /// hold the lock. There may be other readers currently inside the lock when
165 /// this method returns. This method does not provide any guarantees with
166 /// respect to the ordering of whether contentious readers or writers will
167 /// acquire the lock first.
169 /// Returns an RAII guard which will release this thread's shared access
170 /// once it is dropped.
174 /// This function will return an error if the RwLock is poisoned. An RwLock
175 /// is poisoned whenever a writer panics while holding an exclusive lock.
176 /// The failure will occur immediately after the lock has been acquired.
178 #[stable(feature = "rust1", since = "1.0.0")]
179 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
180 unsafe { self.inner.lock.read() }
181 RwLockReadGuard::new(&*self.inner, &self.data)
184 /// Attempts to acquire this rwlock with shared read access.
186 /// If the access could not be granted at this time, then `Err` is returned.
187 /// Otherwise, an RAII guard is returned which will release the shared access
188 /// when it is dropped.
190 /// This function does not block.
192 /// This function does not provide any guarantees with respect to the ordering
193 /// of whether contentious readers or writers will acquire the lock first.
197 /// This function will return an error if the RwLock is poisoned. An RwLock
198 /// is poisoned whenever a writer panics while holding an exclusive lock. An
199 /// error will only be returned if the lock would have otherwise been
202 #[stable(feature = "rust1", since = "1.0.0")]
203 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
204 if unsafe { self.inner.lock.try_read() } {
205 Ok(try!(RwLockReadGuard::new(&*self.inner, &self.data)))
207 Err(TryLockError::WouldBlock)
211 /// Locks this rwlock with exclusive write access, blocking the current
212 /// thread until it can be acquired.
214 /// This function will not return while other writers or other readers
215 /// currently have access to the lock.
217 /// Returns an RAII guard which will drop the write access of this rwlock
222 /// This function will return an error if the RwLock is poisoned. An RwLock
223 /// is poisoned whenever a writer panics while holding an exclusive lock.
224 /// An error will be returned when the lock is acquired.
226 #[stable(feature = "rust1", since = "1.0.0")]
227 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
228 unsafe { self.inner.lock.write() }
229 RwLockWriteGuard::new(&*self.inner, &self.data)
232 /// Attempts to lock this rwlock with exclusive write access.
234 /// If the lock could not be acquired at this time, then `Err` is returned.
235 /// Otherwise, an RAII guard is returned which will release the lock when
238 /// This function does not block.
240 /// This function does not provide any guarantees with respect to the ordering
241 /// of whether contentious readers or writers will acquire the lock first.
245 /// This function will return an error if the RwLock is poisoned. An RwLock
246 /// is poisoned whenever a writer panics while holding an exclusive lock. An
247 /// error will only be returned if the lock would have otherwise been
250 #[stable(feature = "rust1", since = "1.0.0")]
251 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
252 if unsafe { self.inner.lock.try_write() } {
253 Ok(try!(RwLockWriteGuard::new(&*self.inner, &self.data)))
255 Err(TryLockError::WouldBlock)
259 /// Determines whether the lock is poisoned.
261 /// If another thread is active, the lock can still become poisoned at any
262 /// time. You should not trust a `false` value for program correctness
263 /// without additional synchronization.
265 #[stable(feature = "sync_poison", since = "1.2.0")]
266 pub fn is_poisoned(&self) -> bool {
267 self.inner.poison.get()
270 /// Consumes this `RwLock`, returning the underlying data.
274 /// This function will return an error if the RwLock is poisoned. An RwLock
275 /// is poisoned whenever a writer panics while holding an exclusive lock. An
276 /// error will only be returned if the lock would have otherwise been
278 #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
279 pub fn into_inner(self) -> LockResult<T> where T: Sized {
280 // We know statically that there are no outstanding references to
281 // `self` so there's no need to lock the inner StaticRwLock.
283 // To get the inner value, we'd like to call `data.into_inner()`,
284 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
285 // we'll have to destructure it manually instead.
287 // Like `let RwLock { inner, data } = self`.
288 let (inner, data) = {
289 let RwLock { ref inner, ref data } = self;
290 (ptr::read(inner), ptr::read(data))
293 inner.lock.destroy(); // Keep in sync with the `Drop` impl.
295 poison::map_result(inner.poison.borrow(), |_| data.into_inner())
299 /// Returns a mutable reference to the underlying data.
301 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
302 /// take place---the mutable borrow statically guarantees no locks exist.
306 /// This function will return an error if the RwLock is poisoned. An RwLock
307 /// is poisoned whenever a writer panics while holding an exclusive lock. An
308 /// error will only be returned if the lock would have otherwise been
310 #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
311 pub fn get_mut(&mut self) -> LockResult<&mut T> {
312 // We know statically that there are no other references to `self`, so
313 // there's no need to lock the inner StaticRwLock.
314 let data = unsafe { &mut *self.data.get() };
315 poison::map_result(self.inner.poison.borrow(), |_| data )
319 #[stable(feature = "rust1", since = "1.0.0")]
320 impl<T: ?Sized> Drop for RwLock<T> {
321 #[unsafe_destructor_blind_to_params]
323 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
324 unsafe { self.inner.lock.destroy() }
328 #[stable(feature = "rust1", since = "1.0.0")]
329 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
330 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
331 match self.try_read() {
332 Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
333 Err(TryLockError::Poisoned(err)) => {
334 write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref())
336 Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}")
341 struct Dummy(UnsafeCell<()>);
342 unsafe impl Sync for Dummy {}
343 static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
345 #[unstable(feature = "static_rwlock",
346 reason = "may be merged with RwLock in the future",
349 /// Creates a new rwlock.
350 pub const fn new() -> StaticRwLock {
352 lock: sys::RWLock::new(),
353 poison: poison::Flag::new(),
357 /// Locks this rwlock with shared read access, blocking the current thread
358 /// until it can be acquired.
360 /// See `RwLock::read`.
362 pub fn read(&'static self) -> LockResult<RwLockReadGuard<'static, ()>> {
363 unsafe { self.lock.read() }
364 RwLockReadGuard::new(self, &DUMMY.0)
367 /// Attempts to acquire this lock with shared read access.
369 /// See `RwLock::try_read`.
371 pub fn try_read(&'static self)
372 -> TryLockResult<RwLockReadGuard<'static, ()>> {
373 if unsafe { self.lock.try_read() } {
374 Ok(try!(RwLockReadGuard::new(self, &DUMMY.0)))
376 Err(TryLockError::WouldBlock)
380 /// Locks this rwlock with exclusive write access, blocking the current
381 /// thread until it can be acquired.
383 /// See `RwLock::write`.
385 pub fn write(&'static self) -> LockResult<RwLockWriteGuard<'static, ()>> {
386 unsafe { self.lock.write() }
387 RwLockWriteGuard::new(self, &DUMMY.0)
390 /// Attempts to lock this rwlock with exclusive write access.
392 /// See `RwLock::try_write`.
394 pub fn try_write(&'static self)
395 -> TryLockResult<RwLockWriteGuard<'static, ()>> {
396 if unsafe { self.lock.try_write() } {
397 Ok(try!(RwLockWriteGuard::new(self, &DUMMY.0)))
399 Err(TryLockError::WouldBlock)
403 /// Deallocates all resources associated with this static lock.
405 /// This method is unsafe to call as there is no guarantee that there are no
406 /// active users of the lock, and this also doesn't prevent any future users
407 /// of this lock. This method is required to be called to not leak memory on
409 pub unsafe fn destroy(&'static self) {
414 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
415 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
416 -> LockResult<RwLockReadGuard<'rwlock, T>> {
417 poison::map_result(lock.poison.borrow(), |_| {
426 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
427 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
428 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
429 poison::map_result(lock.poison.borrow(), |guard| {
439 #[stable(feature = "rust1", since = "1.0.0")]
440 impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
443 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
446 #[stable(feature = "rust1", since = "1.0.0")]
447 impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
450 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
453 #[stable(feature = "rust1", since = "1.0.0")]
454 impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
455 fn deref_mut(&mut self) -> &mut T {
456 unsafe { &mut *self.__data.get() }
460 #[stable(feature = "rust1", since = "1.0.0")]
461 impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
463 unsafe { self.__lock.lock.read_unlock(); }
467 #[stable(feature = "rust1", since = "1.0.0")]
468 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
470 self.__lock.poison.done(&self.__poison);
471 unsafe { self.__lock.lock.write_unlock(); }
477 #![allow(deprecated)] // rand
481 use rand::{self, Rng};
482 use sync::mpsc::channel;
484 use sync::{Arc, RwLock, StaticRwLock, TryLockError};
485 use sync::atomic::{AtomicUsize, Ordering};
487 #[derive(Eq, PartialEq, Debug)]
492 let l = RwLock::new(());
493 drop(l.read().unwrap());
494 drop(l.write().unwrap());
495 drop((l.read().unwrap(), l.read().unwrap()));
496 drop(l.write().unwrap());
501 static R: StaticRwLock = StaticRwLock::new();
502 drop(R.read().unwrap());
503 drop(R.write().unwrap());
504 drop((R.read().unwrap(), R.read().unwrap()));
505 drop(R.write().unwrap());
506 unsafe { R.destroy(); }
511 static R: StaticRwLock = StaticRwLock::new();
513 const M: usize = 1000;
515 let (tx, rx) = channel::<()>();
518 thread::spawn(move|| {
519 let mut rng = rand::thread_rng();
521 if rng.gen_weighted_bool(N) {
522 drop(R.write().unwrap());
524 drop(R.read().unwrap());
532 unsafe { R.destroy(); }
536 fn test_rw_arc_poison_wr() {
537 let arc = Arc::new(RwLock::new(1));
538 let arc2 = arc.clone();
539 let _: Result<(), _> = thread::spawn(move|| {
540 let _lock = arc2.write().unwrap();
543 assert!(arc.read().is_err());
547 fn test_rw_arc_poison_ww() {
548 let arc = Arc::new(RwLock::new(1));
549 assert!(!arc.is_poisoned());
550 let arc2 = arc.clone();
551 let _: Result<(), _> = thread::spawn(move|| {
552 let _lock = arc2.write().unwrap();
555 assert!(arc.write().is_err());
556 assert!(arc.is_poisoned());
560 fn test_rw_arc_no_poison_rr() {
561 let arc = Arc::new(RwLock::new(1));
562 let arc2 = arc.clone();
563 let _: Result<(), _> = thread::spawn(move|| {
564 let _lock = arc2.read().unwrap();
567 let lock = arc.read().unwrap();
568 assert_eq!(*lock, 1);
571 fn test_rw_arc_no_poison_rw() {
572 let arc = Arc::new(RwLock::new(1));
573 let arc2 = arc.clone();
574 let _: Result<(), _> = thread::spawn(move|| {
575 let _lock = arc2.read().unwrap();
578 let lock = arc.write().unwrap();
579 assert_eq!(*lock, 1);
584 let arc = Arc::new(RwLock::new(0));
585 let arc2 = arc.clone();
586 let (tx, rx) = channel();
588 thread::spawn(move|| {
589 let mut lock = arc2.write().unwrap();
596 tx.send(()).unwrap();
599 // Readers try to catch the writer in the act
600 let mut children = Vec::new();
602 let arc3 = arc.clone();
603 children.push(thread::spawn(move|| {
604 let lock = arc3.read().unwrap();
609 // Wait for children to pass their asserts
611 assert!(r.join().is_ok());
614 // Wait for writer to finish
616 let lock = arc.read().unwrap();
617 assert_eq!(*lock, 10);
621 fn test_rw_arc_access_in_unwind() {
622 let arc = Arc::new(RwLock::new(1));
623 let arc2 = arc.clone();
624 let _ = thread::spawn(move|| -> () {
626 i: Arc<RwLock<isize>>,
628 impl Drop for Unwinder {
630 let mut lock = self.i.write().unwrap();
634 let _u = Unwinder { i: arc2 };
637 let lock = arc.read().unwrap();
638 assert_eq!(*lock, 2);
642 fn test_rwlock_unsized() {
643 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
645 let b = &mut *rw.write().unwrap();
649 let comp: &[i32] = &[4, 2, 5];
650 assert_eq!(&*rw.read().unwrap(), comp);
654 fn test_rwlock_try_write() {
657 let lock = RwLock::new(0isize);
658 let read_guard = lock.read().unwrap();
660 let write_result = lock.try_write();
662 Err(TryLockError::WouldBlock) => (),
663 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
664 Err(_) => assert!(false, "unexpected error"),
671 fn test_into_inner() {
672 let m = RwLock::new(NonCopy(10));
673 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
677 fn test_into_inner_drop() {
678 struct Foo(Arc<AtomicUsize>);
681 self.0.fetch_add(1, Ordering::SeqCst);
684 let num_drops = Arc::new(AtomicUsize::new(0));
685 let m = RwLock::new(Foo(num_drops.clone()));
686 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
688 let _inner = m.into_inner().unwrap();
689 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
691 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
695 fn test_into_inner_poison() {
696 let m = Arc::new(RwLock::new(NonCopy(10)));
698 let _ = thread::spawn(move || {
699 let _lock = m2.write().unwrap();
700 panic!("test panic in inner thread to poison RwLock");
703 assert!(m.is_poisoned());
704 match Arc::try_unwrap(m).unwrap().into_inner() {
705 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
706 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
712 let mut m = RwLock::new(NonCopy(10));
713 *m.get_mut().unwrap() = NonCopy(20);
714 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
718 fn test_get_mut_poison() {
719 let m = Arc::new(RwLock::new(NonCopy(10)));
721 let _ = thread::spawn(move || {
722 let _lock = m2.write().unwrap();
723 panic!("test panic in inner thread to poison RwLock");
726 assert!(m.is_poisoned());
727 match Arc::try_unwrap(m).unwrap().get_mut() {
728 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
729 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),