1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
15 use ops::{Deref, DerefMut};
16 use sync::poison::{mod, LockResult, TryLockError, TryLockResult};
17 use sys_common::rwlock as sys;
19 /// A reader-writer lock
21 /// This type of lock allows a number of readers or at most one writer at any
22 /// point in time. The write portion of this lock typically allows modification
23 /// of the underlying data (exclusive access) and the read portion of this lock
24 /// typically allows for read-only access (shared access).
26 /// The type parameter `T` represents the data that this lock protects. It is
27 /// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
28 /// allow concurrent access through readers. The RAII guards returned from the
29 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
30 /// to allow access to the contained of the lock.
34 /// RWLocks, like Mutexes, will become poisoned on panics. Note, however, that
35 /// an RWLock may only be poisoned if a panic occurs while it is locked
36 /// exclusively (write mode). If a panic occurs in any reader, then the lock
37 /// will not be poisoned.
42 /// use std::sync::RWLock;
44 /// let lock = RWLock::new(5i);
46 /// // many reader locks can be held at once
48 /// let r1 = lock.read().unwrap();
49 /// let r2 = lock.read().unwrap();
50 /// assert_eq!(*r1, 5);
51 /// assert_eq!(*r2, 5);
52 /// } // read locks are dropped at this point
54 /// // only one write lock may be held, however
56 /// let mut w = lock.write().unwrap();
58 /// assert_eq!(*w, 6);
59 /// } // write lock is dropped here
62 pub struct RWLock<T> {
63 inner: Box<StaticRWLock>,
67 unsafe impl<T:'static+Send> Send for RWLock<T> {}
68 unsafe impl<T> Sync for RWLock<T> {}
70 /// Structure representing a statically allocated RWLock.
72 /// This structure is intended to be used inside of a `static` and will provide
73 /// automatic global access as well as lazy initialization. The internal
74 /// resources of this RWLock, however, must be manually deallocated.
79 /// use std::sync::{StaticRWLock, RWLOCK_INIT};
81 /// static LOCK: StaticRWLock = RWLOCK_INIT;
84 /// let _g = LOCK.read().unwrap();
85 /// // ... shared read access
88 /// let _g = LOCK.write().unwrap();
89 /// // ... exclusive write access
91 /// unsafe { LOCK.destroy() } // free all resources
93 #[unstable = "may be merged with RWLock in the future"]
94 pub struct StaticRWLock {
99 unsafe impl Send for StaticRWLock {}
100 unsafe impl Sync for StaticRWLock {}
102 /// Constant initialization for a statically-initialized rwlock.
103 #[unstable = "may be merged with RWLock in the future"]
104 pub const RWLOCK_INIT: StaticRWLock = StaticRWLock {
105 lock: sys::RWLOCK_INIT,
106 poison: poison::FLAG_INIT,
109 /// RAII structure used to release the shared read access of a lock when
113 pub struct RWLockReadGuard<'a, T: 'a> {
114 __lock: &'a StaticRWLock,
115 __data: &'a UnsafeCell<T>,
116 __marker: marker::NoSend,
119 /// RAII structure used to release the exclusive write access of a lock when
123 pub struct RWLockWriteGuard<'a, T: 'a> {
124 __lock: &'a StaticRWLock,
125 __data: &'a UnsafeCell<T>,
126 __poison: poison::Guard,
127 __marker: marker::NoSend,
130 impl<T: Send + Sync> RWLock<T> {
131 /// Creates a new instance of an RWLock which is unlocked and read to go.
133 pub fn new(t: T) -> RWLock<T> {
134 RWLock { inner: box RWLOCK_INIT, data: UnsafeCell::new(t) }
137 /// Locks this rwlock with shared read access, blocking the current thread
138 /// until it can be acquired.
140 /// The calling thread will be blocked until there are no more writers which
141 /// hold the lock. There may be other readers currently inside the lock when
142 /// this method returns. This method does not provide any guarantees with
143 /// respect to the ordering of whether contentious readers or writers will
144 /// acquire the lock first.
146 /// Returns an RAII guard which will release this thread's shared access
147 /// once it is dropped.
151 /// This function will return an error if the RWLock is poisoned. An RWLock
152 /// is poisoned whenever a writer panics while holding an exclusive lock.
153 /// The failure will occur immediately after the lock has been acquired.
156 pub fn read(&self) -> LockResult<RWLockReadGuard<T>> {
157 unsafe { self.inner.lock.read() }
158 RWLockReadGuard::new(&*self.inner, &self.data)
161 /// Attempt to acquire this lock with shared read access.
163 /// This function will never block and will return immediately if `read`
164 /// would otherwise succeed. Returns `Some` of an RAII guard which will
165 /// release the shared access of this thread when dropped, or `None` if the
166 /// access could not be granted. This method does not provide any
167 /// guarantees with respect to the ordering of whether contentious readers
168 /// or writers will acquire the lock first.
172 /// This function will return an error if the RWLock is poisoned. An RWLock
173 /// is poisoned whenever a writer panics while holding an exclusive lock. An
174 /// error will only be returned if the lock would have otherwise been
178 pub fn try_read(&self) -> TryLockResult<RWLockReadGuard<T>> {
179 if unsafe { self.inner.lock.try_read() } {
180 Ok(try!(RWLockReadGuard::new(&*self.inner, &self.data)))
182 Err(TryLockError::WouldBlock)
186 /// Lock this rwlock with exclusive write access, blocking the current
187 /// thread until it can be acquired.
189 /// This function will not return while other writers or other readers
190 /// currently have access to the lock.
192 /// Returns an RAII guard which will drop the write access of this rwlock
197 /// This function will return an error if the RWLock is poisoned. An RWLock
198 /// is poisoned whenever a writer panics while holding an exclusive lock.
199 /// An error will be returned when the lock is acquired.
202 pub fn write(&self) -> LockResult<RWLockWriteGuard<T>> {
203 unsafe { self.inner.lock.write() }
204 RWLockWriteGuard::new(&*self.inner, &self.data)
207 /// Attempt to lock this rwlock with exclusive write access.
209 /// This function does not ever block, and it will return `None` if a call
210 /// to `write` would otherwise block. If successful, an RAII guard is
215 /// This function will return an error if the RWLock is poisoned. An RWLock
216 /// is poisoned whenever a writer panics while holding an exclusive lock. An
217 /// error will only be returned if the lock would have otherwise been
221 pub fn try_write(&self) -> TryLockResult<RWLockWriteGuard<T>> {
222 if unsafe { self.inner.lock.try_read() } {
223 Ok(try!(RWLockWriteGuard::new(&*self.inner, &self.data)))
225 Err(TryLockError::WouldBlock)
231 impl<T> Drop for RWLock<T> {
233 unsafe { self.inner.lock.destroy() }
237 struct Dummy(UnsafeCell<()>);
238 unsafe impl Sync for Dummy {}
239 static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
242 /// Locks this rwlock with shared read access, blocking the current thread
243 /// until it can be acquired.
245 /// See `RWLock::read`.
247 #[unstable = "may be merged with RWLock in the future"]
248 pub fn read(&'static self) -> LockResult<RWLockReadGuard<'static, ()>> {
249 unsafe { self.lock.read() }
250 RWLockReadGuard::new(self, &DUMMY.0)
253 /// Attempt to acquire this lock with shared read access.
255 /// See `RWLock::try_read`.
257 #[unstable = "may be merged with RWLock in the future"]
258 pub fn try_read(&'static self)
259 -> TryLockResult<RWLockReadGuard<'static, ()>> {
260 if unsafe { self.lock.try_read() } {
261 Ok(try!(RWLockReadGuard::new(self, &DUMMY.0)))
263 Err(TryLockError::WouldBlock)
267 /// Lock this rwlock with exclusive write access, blocking the current
268 /// thread until it can be acquired.
270 /// See `RWLock::write`.
272 #[unstable = "may be merged with RWLock in the future"]
273 pub fn write(&'static self) -> LockResult<RWLockWriteGuard<'static, ()>> {
274 unsafe { self.lock.write() }
275 RWLockWriteGuard::new(self, &DUMMY.0)
278 /// Attempt to lock this rwlock with exclusive write access.
280 /// See `RWLock::try_write`.
282 #[unstable = "may be merged with RWLock in the future"]
283 pub fn try_write(&'static self)
284 -> TryLockResult<RWLockWriteGuard<'static, ()>> {
285 if unsafe { self.lock.try_write() } {
286 Ok(try!(RWLockWriteGuard::new(self, &DUMMY.0)))
288 Err(TryLockError::WouldBlock)
292 /// Deallocate all resources associated with this static lock.
294 /// This method is unsafe to call as there is no guarantee that there are no
295 /// active users of the lock, and this also doesn't prevent any future users
296 /// of this lock. This method is required to be called to not leak memory on
298 #[unstable = "may be merged with RWLock in the future"]
299 pub unsafe fn destroy(&'static self) {
304 impl<'rwlock, T> RWLockReadGuard<'rwlock, T> {
305 fn new(lock: &'rwlock StaticRWLock, data: &'rwlock UnsafeCell<T>)
306 -> LockResult<RWLockReadGuard<'rwlock, T>> {
307 poison::map_result(lock.poison.borrow(), |_| {
311 __marker: marker::NoSend,
316 impl<'rwlock, T> RWLockWriteGuard<'rwlock, T> {
317 fn new(lock: &'rwlock StaticRWLock, data: &'rwlock UnsafeCell<T>)
318 -> LockResult<RWLockWriteGuard<'rwlock, T>> {
319 poison::map_result(lock.poison.borrow(), |guard| {
324 __marker: marker::NoSend,
330 impl<'rwlock, T> Deref for RWLockReadGuard<'rwlock, T> {
333 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
335 impl<'rwlock, T> Deref for RWLockWriteGuard<'rwlock, T> {
338 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
340 impl<'rwlock, T> DerefMut for RWLockWriteGuard<'rwlock, T> {
341 fn deref_mut(&mut self) -> &mut T {
342 unsafe { &mut *self.__data.get() }
347 impl<'a, T> Drop for RWLockReadGuard<'a, T> {
349 unsafe { self.__lock.lock.read_unlock(); }
354 impl<'a, T> Drop for RWLockWriteGuard<'a, T> {
356 self.__lock.poison.done(&self.__poison);
357 unsafe { self.__lock.lock.write_unlock(); }
365 use rand::{mod, Rng};
366 use sync::mpsc::channel;
368 use sync::{Arc, RWLock, StaticRWLock, RWLOCK_INIT};
372 let l = RWLock::new(());
373 drop(l.read().unwrap());
374 drop(l.write().unwrap());
375 drop((l.read().unwrap(), l.read().unwrap()));
376 drop(l.write().unwrap());
381 static R: StaticRWLock = RWLOCK_INIT;
382 drop(R.read().unwrap());
383 drop(R.write().unwrap());
384 drop((R.read().unwrap(), R.read().unwrap()));
385 drop(R.write().unwrap());
386 unsafe { R.destroy(); }
391 static R: StaticRWLock = RWLOCK_INIT;
393 static M: uint = 1000;
395 let (tx, rx) = channel::<()>();
396 for _ in range(0, N) {
398 Thread::spawn(move|| {
399 let mut rng = rand::thread_rng();
400 for _ in range(0, M) {
401 if rng.gen_weighted_bool(N) {
402 drop(R.write().unwrap());
404 drop(R.read().unwrap());
412 unsafe { R.destroy(); }
416 fn test_rw_arc_poison_wr() {
417 let arc = Arc::new(RWLock::new(1i));
418 let arc2 = arc.clone();
419 let _: Result<uint, _> = Thread::spawn(move|| {
420 let _lock = arc2.write().unwrap();
423 assert!(arc.read().is_err());
427 fn test_rw_arc_poison_ww() {
428 let arc = Arc::new(RWLock::new(1i));
429 let arc2 = arc.clone();
430 let _: Result<uint, _> = Thread::spawn(move|| {
431 let _lock = arc2.write().unwrap();
434 assert!(arc.write().is_err());
438 fn test_rw_arc_no_poison_rr() {
439 let arc = Arc::new(RWLock::new(1i));
440 let arc2 = arc.clone();
441 let _: Result<uint, _> = Thread::spawn(move|| {
442 let _lock = arc2.read().unwrap();
445 let lock = arc.read().unwrap();
446 assert_eq!(*lock, 1);
449 fn test_rw_arc_no_poison_rw() {
450 let arc = Arc::new(RWLock::new(1i));
451 let arc2 = arc.clone();
452 let _: Result<uint, _> = Thread::spawn(move|| {
453 let _lock = arc2.read().unwrap();
456 let lock = arc.write().unwrap();
457 assert_eq!(*lock, 1);
462 let arc = Arc::new(RWLock::new(0i));
463 let arc2 = arc.clone();
464 let (tx, rx) = channel();
466 Thread::spawn(move|| {
467 let mut lock = arc2.write().unwrap();
468 for _ in range(0u, 10) {
474 tx.send(()).unwrap();
477 // Readers try to catch the writer in the act
478 let mut children = Vec::new();
479 for _ in range(0u, 5) {
480 let arc3 = arc.clone();
481 children.push(Thread::spawn(move|| {
482 let lock = arc3.read().unwrap();
487 // Wait for children to pass their asserts
488 for r in children.into_iter() {
489 assert!(r.join().is_ok());
492 // Wait for writer to finish
494 let lock = arc.read().unwrap();
495 assert_eq!(*lock, 10);
499 fn test_rw_arc_access_in_unwind() {
500 let arc = Arc::new(RWLock::new(1i));
501 let arc2 = arc.clone();
502 let _ = Thread::spawn(move|| -> () {
506 impl Drop for Unwinder {
508 let mut lock = self.i.write().unwrap();
512 let _u = Unwinder { i: arc2 };
515 let lock = arc.read().unwrap();
516 assert_eq!(*lock, 2);