]> git.lizzy.rs Git - rust.git/blob - library/std/src/sys/unix/locks/pthread_rwlock.rs
Merge commit '266e96785ab71834b917bf474f130a6d8fdecd4b' into sync_cg_clif-2022-10-23
[rust.git] / library / std / src / sys / unix / locks / pthread_rwlock.rs
1 use crate::cell::UnsafeCell;
2 use crate::mem::forget;
3 use crate::sync::atomic::{AtomicUsize, Ordering};
4 use crate::sys_common::lazy_box::{LazyBox, LazyInit};
5
6 pub struct RwLock {
7     inner: UnsafeCell<libc::pthread_rwlock_t>,
8     write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
9     num_readers: AtomicUsize,
10 }
11
12 pub(crate) type MovableRwLock = LazyBox<RwLock>;
13
14 unsafe impl Send for RwLock {}
15 unsafe impl Sync for RwLock {}
16
17 impl LazyInit for RwLock {
18     fn init() -> Box<Self> {
19         Box::new(Self::new())
20     }
21
22     fn destroy(mut rwlock: Box<Self>) {
23         // We're not allowed to pthread_rwlock_destroy a locked rwlock,
24         // so check first if it's unlocked.
25         if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 {
26             // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked.
27             // In this case, we just leak the RwLock too.
28             forget(rwlock);
29         }
30     }
31
32     fn cancel_init(_: Box<Self>) {
33         // In this case, we can just drop it without any checks,
34         // since it cannot have been locked yet.
35     }
36 }
37
38 impl RwLock {
39     pub const fn new() -> RwLock {
40         RwLock {
41             inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
42             write_locked: UnsafeCell::new(false),
43             num_readers: AtomicUsize::new(0),
44         }
45     }
46     #[inline]
47     pub unsafe fn read(&self) {
48         let r = libc::pthread_rwlock_rdlock(self.inner.get());
49
50         // According to POSIX, when a thread tries to acquire this read lock
51         // while it already holds the write lock
52         // (or vice versa, or tries to acquire the write lock twice),
53         // "the call shall either deadlock or return [EDEADLK]"
54         // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
55         // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
56         // So, in principle, all we have to do here is check `r == 0` to be sure we properly
57         // got the lock.
58         //
59         // However, (at least) glibc before version 2.25 does not conform to this spec,
60         // and can return `r == 0` even when this thread already holds the write lock.
61         // We thus check for this situation ourselves and panic when detecting that a thread
62         // got the write lock more than once, or got a read and a write lock.
63         if r == libc::EAGAIN {
64             panic!("rwlock maximum reader count exceeded");
65         } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
66             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
67             // data races.
68             if r == 0 {
69                 // `pthread_rwlock_rdlock` succeeded when it should not have.
70                 self.raw_unlock();
71             }
72             panic!("rwlock read lock would result in deadlock");
73         } else {
74             // POSIX does not make guarantees about all the errors that may be returned.
75             // See issue #94705 for more details.
76             assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
77             self.num_readers.fetch_add(1, Ordering::Relaxed);
78         }
79     }
80     #[inline]
81     pub unsafe fn try_read(&self) -> bool {
82         let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
83         if r == 0 {
84             if *self.write_locked.get() {
85                 // `pthread_rwlock_tryrdlock` succeeded when it should not have.
86                 self.raw_unlock();
87                 false
88             } else {
89                 self.num_readers.fetch_add(1, Ordering::Relaxed);
90                 true
91             }
92         } else {
93             false
94         }
95     }
96     #[inline]
97     pub unsafe fn write(&self) {
98         let r = libc::pthread_rwlock_wrlock(self.inner.get());
99         // See comments above for why we check for EDEADLK and write_locked. For the same reason,
100         // we also need to check that there are no readers (tracked in `num_readers`).
101         if r == libc::EDEADLK
102             || (r == 0 && *self.write_locked.get())
103             || self.num_readers.load(Ordering::Relaxed) != 0
104         {
105             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
106             // data races.
107             if r == 0 {
108                 // `pthread_rwlock_wrlock` succeeded when it should not have.
109                 self.raw_unlock();
110             }
111             panic!("rwlock write lock would result in deadlock");
112         } else {
113             // According to POSIX, for a properly initialized rwlock this can only
114             // return EDEADLK or 0. We rely on that.
115             debug_assert_eq!(r, 0);
116         }
117         *self.write_locked.get() = true;
118     }
119     #[inline]
120     pub unsafe fn try_write(&self) -> bool {
121         let r = libc::pthread_rwlock_trywrlock(self.inner.get());
122         if r == 0 {
123             if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
124                 // `pthread_rwlock_trywrlock` succeeded when it should not have.
125                 self.raw_unlock();
126                 false
127             } else {
128                 *self.write_locked.get() = true;
129                 true
130             }
131         } else {
132             false
133         }
134     }
135     #[inline]
136     unsafe fn raw_unlock(&self) {
137         let r = libc::pthread_rwlock_unlock(self.inner.get());
138         debug_assert_eq!(r, 0);
139     }
140     #[inline]
141     pub unsafe fn read_unlock(&self) {
142         debug_assert!(!*self.write_locked.get());
143         self.num_readers.fetch_sub(1, Ordering::Relaxed);
144         self.raw_unlock();
145     }
146     #[inline]
147     pub unsafe fn write_unlock(&self) {
148         debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
149         debug_assert!(*self.write_locked.get());
150         *self.write_locked.get() = false;
151         self.raw_unlock();
152     }
153     #[inline]
154     unsafe fn destroy(&mut self) {
155         let r = libc::pthread_rwlock_destroy(self.inner.get());
156         // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
157         // rwlock that was just initialized with
158         // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
159         // or pthread_rwlock_init() is called, this behaviour no longer occurs.
160         if cfg!(target_os = "dragonfly") {
161             debug_assert!(r == 0 || r == libc::EINVAL);
162         } else {
163             debug_assert_eq!(r, 0);
164         }
165     }
166 }
167
168 impl Drop for RwLock {
169     #[inline]
170     fn drop(&mut self) {
171         unsafe { self.destroy() };
172     }
173 }