]> git.lizzy.rs Git - rust.git/blob - library/std/src/sys/unix/locks/pthread_rwlock.rs
Rollup merge of #104356 - RalfJung:interpret-check-mplace, r=oli-obk
[rust.git] / library / std / src / sys / unix / locks / pthread_rwlock.rs
1 use crate::cell::UnsafeCell;
2 use crate::mem::forget;
3 use crate::sync::atomic::{AtomicUsize, Ordering};
4 use crate::sys_common::lazy_box::{LazyBox, LazyInit};
5
6 struct AllocatedRwLock {
7     inner: UnsafeCell<libc::pthread_rwlock_t>,
8     write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
9     num_readers: AtomicUsize,
10 }
11
12 unsafe impl Send for AllocatedRwLock {}
13 unsafe impl Sync for AllocatedRwLock {}
14
15 pub struct RwLock {
16     inner: LazyBox<AllocatedRwLock>,
17 }
18
19 impl LazyInit for AllocatedRwLock {
20     fn init() -> Box<Self> {
21         Box::new(AllocatedRwLock {
22             inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
23             write_locked: UnsafeCell::new(false),
24             num_readers: AtomicUsize::new(0),
25         })
26     }
27
28     fn destroy(mut rwlock: Box<Self>) {
29         // We're not allowed to pthread_rwlock_destroy a locked rwlock,
30         // so check first if it's unlocked.
31         if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 {
32             // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked.
33             // In this case, we just leak the RwLock too.
34             forget(rwlock);
35         }
36     }
37
38     fn cancel_init(_: Box<Self>) {
39         // In this case, we can just drop it without any checks,
40         // since it cannot have been locked yet.
41     }
42 }
43
44 impl AllocatedRwLock {
45     #[inline]
46     unsafe fn raw_unlock(&self) {
47         let r = libc::pthread_rwlock_unlock(self.inner.get());
48         debug_assert_eq!(r, 0);
49     }
50 }
51
52 impl Drop for AllocatedRwLock {
53     fn drop(&mut self) {
54         let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) };
55         // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
56         // rwlock that was just initialized with
57         // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
58         // or pthread_rwlock_init() is called, this behaviour no longer occurs.
59         if cfg!(target_os = "dragonfly") {
60             debug_assert!(r == 0 || r == libc::EINVAL);
61         } else {
62             debug_assert_eq!(r, 0);
63         }
64     }
65 }
66
67 impl RwLock {
68     #[inline]
69     pub const fn new() -> RwLock {
70         RwLock { inner: LazyBox::new() }
71     }
72
73     #[inline]
74     pub fn read(&self) {
75         let lock = &*self.inner;
76         let r = unsafe { libc::pthread_rwlock_rdlock(lock.inner.get()) };
77
78         // According to POSIX, when a thread tries to acquire this read lock
79         // while it already holds the write lock
80         // (or vice versa, or tries to acquire the write lock twice),
81         // "the call shall either deadlock or return [EDEADLK]"
82         // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
83         // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
84         // So, in principle, all we have to do here is check `r == 0` to be sure we properly
85         // got the lock.
86         //
87         // However, (at least) glibc before version 2.25 does not conform to this spec,
88         // and can return `r == 0` even when this thread already holds the write lock.
89         // We thus check for this situation ourselves and panic when detecting that a thread
90         // got the write lock more than once, or got a read and a write lock.
91         if r == libc::EAGAIN {
92             panic!("rwlock maximum reader count exceeded");
93         } else if r == libc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) {
94             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
95             // data races.
96             if r == 0 {
97                 // `pthread_rwlock_rdlock` succeeded when it should not have.
98                 unsafe {
99                     lock.raw_unlock();
100                 }
101             }
102             panic!("rwlock read lock would result in deadlock");
103         } else {
104             // POSIX does not make guarantees about all the errors that may be returned.
105             // See issue #94705 for more details.
106             assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
107             lock.num_readers.fetch_add(1, Ordering::Relaxed);
108         }
109     }
110
111     #[inline]
112     pub fn try_read(&self) -> bool {
113         let lock = &*self.inner;
114         let r = unsafe { libc::pthread_rwlock_tryrdlock(lock.inner.get()) };
115         if r == 0 {
116             if unsafe { *lock.write_locked.get() } {
117                 // `pthread_rwlock_tryrdlock` succeeded when it should not have.
118                 unsafe {
119                     lock.raw_unlock();
120                 }
121                 false
122             } else {
123                 lock.num_readers.fetch_add(1, Ordering::Relaxed);
124                 true
125             }
126         } else {
127             false
128         }
129     }
130
131     #[inline]
132     pub fn write(&self) {
133         let lock = &*self.inner;
134         let r = unsafe { libc::pthread_rwlock_wrlock(lock.inner.get()) };
135         // See comments above for why we check for EDEADLK and write_locked. For the same reason,
136         // we also need to check that there are no readers (tracked in `num_readers`).
137         if r == libc::EDEADLK
138             || (r == 0 && unsafe { *lock.write_locked.get() })
139             || lock.num_readers.load(Ordering::Relaxed) != 0
140         {
141             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
142             // data races.
143             if r == 0 {
144                 // `pthread_rwlock_wrlock` succeeded when it should not have.
145                 unsafe {
146                     lock.raw_unlock();
147                 }
148             }
149             panic!("rwlock write lock would result in deadlock");
150         } else {
151             // According to POSIX, for a properly initialized rwlock this can only
152             // return EDEADLK or 0. We rely on that.
153             debug_assert_eq!(r, 0);
154         }
155
156         unsafe {
157             *lock.write_locked.get() = true;
158         }
159     }
160
161     #[inline]
162     pub unsafe fn try_write(&self) -> bool {
163         let lock = &*self.inner;
164         let r = libc::pthread_rwlock_trywrlock(lock.inner.get());
165         if r == 0 {
166             if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 {
167                 // `pthread_rwlock_trywrlock` succeeded when it should not have.
168                 lock.raw_unlock();
169                 false
170             } else {
171                 *lock.write_locked.get() = true;
172                 true
173             }
174         } else {
175             false
176         }
177     }
178
179     #[inline]
180     pub unsafe fn read_unlock(&self) {
181         let lock = &*self.inner;
182         debug_assert!(!*lock.write_locked.get());
183         lock.num_readers.fetch_sub(1, Ordering::Relaxed);
184         lock.raw_unlock();
185     }
186
187     #[inline]
188     pub unsafe fn write_unlock(&self) {
189         let lock = &*self.inner;
190         debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0);
191         debug_assert!(*lock.write_locked.get());
192         *lock.write_locked.get() = false;
193         lock.raw_unlock();
194     }
195 }