]> git.lizzy.rs Git - rust.git/blob - library/std/src/sys/unix/rwlock.rs
Auto merge of #78681 - m-ou-se:binary-heap-retain, r=Amanieu
[rust.git] / library / std / src / sys / unix / rwlock.rs
1 use crate::cell::UnsafeCell;
2 use crate::sync::atomic::{AtomicUsize, Ordering};
3
4 pub struct RWLock {
5     inner: UnsafeCell<libc::pthread_rwlock_t>,
6     write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
7     num_readers: AtomicUsize,
8 }
9
10 unsafe impl Send for RWLock {}
11 unsafe impl Sync for RWLock {}
12
13 impl RWLock {
14     pub const fn new() -> RWLock {
15         RWLock {
16             inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
17             write_locked: UnsafeCell::new(false),
18             num_readers: AtomicUsize::new(0),
19         }
20     }
21     #[inline]
22     pub unsafe fn read(&self) {
23         let r = libc::pthread_rwlock_rdlock(self.inner.get());
24
25         // According to POSIX, when a thread tries to acquire this read lock
26         // while it already holds the write lock
27         // (or vice versa, or tries to acquire the write lock twice),
28         // "the call shall either deadlock or return [EDEADLK]"
29         // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
30         // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
31         // So, in principle, all we have to do here is check `r == 0` to be sure we properly
32         // got the lock.
33         //
34         // However, (at least) glibc before version 2.25 does not conform to this spec,
35         // and can return `r == 0` even when this thread already holds the write lock.
36         // We thus check for this situation ourselves and panic when detecting that a thread
37         // got the write lock more than once, or got a read and a write lock.
38         if r == libc::EAGAIN {
39             panic!("rwlock maximum reader count exceeded");
40         } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
41             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
42             // data races.
43             if r == 0 {
44                 // `pthread_rwlock_rdlock` succeeded when it should not have.
45                 self.raw_unlock();
46             }
47             panic!("rwlock read lock would result in deadlock");
48         } else {
49             // According to POSIX, for a properly initialized rwlock this can only
50             // return EAGAIN or EDEADLK or 0. We rely on that.
51             debug_assert_eq!(r, 0);
52             self.num_readers.fetch_add(1, Ordering::Relaxed);
53         }
54     }
55     #[inline]
56     pub unsafe fn try_read(&self) -> bool {
57         let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
58         if r == 0 {
59             if *self.write_locked.get() {
60                 // `pthread_rwlock_tryrdlock` succeeded when it should not have.
61                 self.raw_unlock();
62                 false
63             } else {
64                 self.num_readers.fetch_add(1, Ordering::Relaxed);
65                 true
66             }
67         } else {
68             false
69         }
70     }
71     #[inline]
72     pub unsafe fn write(&self) {
73         let r = libc::pthread_rwlock_wrlock(self.inner.get());
74         // See comments above for why we check for EDEADLK and write_locked. For the same reason,
75         // we also need to check that there are no readers (tracked in `num_readers`).
76         if r == libc::EDEADLK
77             || (r == 0 && *self.write_locked.get())
78             || self.num_readers.load(Ordering::Relaxed) != 0
79         {
80             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
81             // data races.
82             if r == 0 {
83                 // `pthread_rwlock_wrlock` succeeded when it should not have.
84                 self.raw_unlock();
85             }
86             panic!("rwlock write lock would result in deadlock");
87         } else {
88             // According to POSIX, for a properly initialized rwlock this can only
89             // return EDEADLK or 0. We rely on that.
90             debug_assert_eq!(r, 0);
91         }
92         *self.write_locked.get() = true;
93     }
94     #[inline]
95     pub unsafe fn try_write(&self) -> bool {
96         let r = libc::pthread_rwlock_trywrlock(self.inner.get());
97         if r == 0 {
98             if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
99                 // `pthread_rwlock_trywrlock` succeeded when it should not have.
100                 self.raw_unlock();
101                 false
102             } else {
103                 *self.write_locked.get() = true;
104                 true
105             }
106         } else {
107             false
108         }
109     }
110     #[inline]
111     unsafe fn raw_unlock(&self) {
112         let r = libc::pthread_rwlock_unlock(self.inner.get());
113         debug_assert_eq!(r, 0);
114     }
115     #[inline]
116     pub unsafe fn read_unlock(&self) {
117         debug_assert!(!*self.write_locked.get());
118         self.num_readers.fetch_sub(1, Ordering::Relaxed);
119         self.raw_unlock();
120     }
121     #[inline]
122     pub unsafe fn write_unlock(&self) {
123         debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
124         debug_assert!(*self.write_locked.get());
125         *self.write_locked.get() = false;
126         self.raw_unlock();
127     }
128     #[inline]
129     pub unsafe fn destroy(&self) {
130         let r = libc::pthread_rwlock_destroy(self.inner.get());
131         // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
132         // rwlock that was just initialized with
133         // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
134         // or pthread_rwlock_init() is called, this behaviour no longer occurs.
135         if cfg!(target_os = "dragonfly") {
136             debug_assert!(r == 0 || r == libc::EINVAL);
137         } else {
138             debug_assert_eq!(r, 0);
139         }
140     }
141 }
142
143 pub struct StaticRWLock(RWLock);
144
145 impl StaticRWLock {
146     pub const fn new() -> StaticRWLock {
147         StaticRWLock(RWLock::new())
148     }
149
150     /// Acquires shared access to the underlying lock, blocking the current
151     /// thread to do so.
152     ///
153     /// The lock is automatically unlocked when the returned guard is dropped.
154     #[inline]
155     pub fn read_with_guard(&'static self) -> RWLockReadGuard {
156         // SAFETY: All methods require static references, therefore self
157         // cannot be moved between invocations.
158         unsafe {
159             self.0.read();
160         }
161         RWLockReadGuard(&self.0)
162     }
163
164     /// Acquires write access to the underlying lock, blocking the current thread
165     /// to do so.
166     ///
167     /// The lock is automatically unlocked when the returned guard is dropped.
168     #[inline]
169     pub fn write_with_guard(&'static self) -> RWLockWriteGuard {
170         // SAFETY: All methods require static references, therefore self
171         // cannot be moved between invocations.
172         unsafe {
173             self.0.write();
174         }
175         RWLockWriteGuard(&self.0)
176     }
177 }
178
179 pub struct RWLockReadGuard(&'static RWLock);
180
181 impl Drop for RWLockReadGuard {
182     fn drop(&mut self) {
183         unsafe { self.0.read_unlock() }
184     }
185 }
186
187 pub struct RWLockWriteGuard(&'static RWLock);
188
189 impl Drop for RWLockWriteGuard {
190     fn drop(&mut self) {
191         unsafe { self.0.write_unlock() }
192     }
193 }