]> git.lizzy.rs Git - rust.git/blob - library/std/src/sys/cloudabi/rwlock.rs
Rollup merge of #78295 - Alexendoo:ice-regression-tests, r=nagisa
[rust.git] / library / std / src / sys / cloudabi / rwlock.rs
1 use crate::mem;
2 use crate::mem::MaybeUninit;
3 use crate::sync::atomic::{AtomicU32, Ordering};
4 use crate::sys::cloudabi::abi;
5
6 extern "C" {
7     #[thread_local]
8     static __pthread_thread_id: abi::tid;
9 }
10
11 #[thread_local]
12 static mut RDLOCKS_ACQUIRED: u32 = 0;
13
14 pub struct RWLock {
15     lock: AtomicU32,
16 }
17
18 pub unsafe fn raw(r: &RWLock) -> &AtomicU32 {
19     &r.lock
20 }
21
22 unsafe impl Send for RWLock {}
23 unsafe impl Sync for RWLock {}
24
25 impl RWLock {
26     pub const fn new() -> RWLock {
27         RWLock { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0) }
28     }
29
30     pub unsafe fn try_read(&self) -> bool {
31         let mut old = abi::LOCK_UNLOCKED.0;
32         while let Err(cur) =
33             self.lock.compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed)
34         {
35             if (cur & abi::LOCK_WRLOCKED.0) != 0 {
36                 // Another thread already has a write lock.
37                 assert_ne!(
38                     old & !abi::LOCK_KERNEL_MANAGED.0,
39                     __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
40                     "Attempted to acquire a read lock while holding a write lock"
41                 );
42                 return false;
43             } else if (old & abi::LOCK_KERNEL_MANAGED.0) != 0 && RDLOCKS_ACQUIRED == 0 {
44                 // Lock has threads waiting for the lock. Only acquire
45                 // the lock if we have already acquired read locks. In
46                 // that case, it is justified to acquire this lock to
47                 // prevent a deadlock.
48                 return false;
49             }
50             old = cur;
51         }
52
53         RDLOCKS_ACQUIRED += 1;
54         true
55     }
56
57     pub unsafe fn read(&self) {
58         if !self.try_read() {
59             // Call into the kernel to acquire a read lock.
60             let subscription = abi::subscription {
61                 type_: abi::eventtype::LOCK_RDLOCK,
62                 union: abi::subscription_union {
63                     lock: abi::subscription_lock {
64                         lock: &self.lock as *const AtomicU32 as *mut abi::lock,
65                         lock_scope: abi::scope::PRIVATE,
66                     },
67                 },
68                 ..mem::zeroed()
69             };
70             let mut event = MaybeUninit::<abi::event>::uninit();
71             let mut nevents = MaybeUninit::<usize>::uninit();
72             let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr());
73             assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire read lock");
74             let event = event.assume_init();
75             assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire read lock");
76
77             RDLOCKS_ACQUIRED += 1;
78         }
79     }
80
81     pub unsafe fn read_unlock(&self) {
82         // Perform a read unlock. We can do this in userspace, except when
83         // other threads are blocked and we are performing the last unlock.
84         // In that case, call into the kernel.
85         //
86         // Other threads may attempt to increment the read lock count,
87         // meaning that the call into the kernel could be spurious. To
88         // prevent this from happening, upgrade to a write lock first. This
89         // allows us to call into the kernel, having the guarantee that the
90         // lock value will not change in the meantime.
91         assert!(RDLOCKS_ACQUIRED > 0, "Bad lock count");
92         let mut old = 1;
93         loop {
94             if old == 1 | abi::LOCK_KERNEL_MANAGED.0 {
95                 // Last read lock while threads are waiting. Attempt to upgrade
96                 // to a write lock before calling into the kernel to unlock.
97                 if let Err(cur) = self.lock.compare_exchange_weak(
98                     old,
99                     __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 | abi::LOCK_KERNEL_MANAGED.0,
100                     Ordering::Acquire,
101                     Ordering::Relaxed,
102                 ) {
103                     old = cur;
104                 } else {
105                     // Call into the kernel to unlock.
106                     let ret = abi::lock_unlock(
107                         &self.lock as *const AtomicU32 as *mut abi::lock,
108                         abi::scope::PRIVATE,
109                     );
110                     assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock");
111                     break;
112                 }
113             } else {
114                 // No threads waiting or not the last read lock. Just decrement
115                 // the read lock count.
116                 assert_ne!(old & !abi::LOCK_KERNEL_MANAGED.0, 0, "This rwlock is not locked");
117                 assert_eq!(
118                     old & abi::LOCK_WRLOCKED.0,
119                     0,
120                     "Attempted to read-unlock a write-locked rwlock"
121                 );
122                 if let Err(cur) = self.lock.compare_exchange_weak(
123                     old,
124                     old - 1,
125                     Ordering::Acquire,
126                     Ordering::Relaxed,
127                 ) {
128                     old = cur;
129                 } else {
130                     break;
131                 }
132             }
133         }
134
135         RDLOCKS_ACQUIRED -= 1;
136     }
137
138     pub unsafe fn try_write(&self) -> bool {
139         // Attempt to acquire the lock.
140         if let Err(old) = self.lock.compare_exchange(
141             abi::LOCK_UNLOCKED.0,
142             __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
143             Ordering::Acquire,
144             Ordering::Relaxed,
145         ) {
146             // Failure. Crash upon recursive acquisition.
147             assert_ne!(
148                 old & !abi::LOCK_KERNEL_MANAGED.0,
149                 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
150                 "Attempted to recursive write-lock a rwlock",
151             );
152             false
153         } else {
154             // Success.
155             true
156         }
157     }
158
159     pub unsafe fn write(&self) {
160         if !self.try_write() {
161             // Call into the kernel to acquire a write lock.
162             let subscription = abi::subscription {
163                 type_: abi::eventtype::LOCK_WRLOCK,
164                 union: abi::subscription_union {
165                     lock: abi::subscription_lock {
166                         lock: &self.lock as *const AtomicU32 as *mut abi::lock,
167                         lock_scope: abi::scope::PRIVATE,
168                     },
169                 },
170                 ..mem::zeroed()
171             };
172             let mut event = MaybeUninit::<abi::event>::uninit();
173             let mut nevents = MaybeUninit::<usize>::uninit();
174             let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr());
175             assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire write lock");
176             let event = event.assume_init();
177             assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire write lock");
178         }
179     }
180
181     pub unsafe fn write_unlock(&self) {
182         assert_eq!(
183             self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
184             __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
185             "This rwlock is not write-locked by this thread"
186         );
187
188         if !self
189             .lock
190             .compare_exchange(
191                 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
192                 abi::LOCK_UNLOCKED.0,
193                 Ordering::Release,
194                 Ordering::Relaxed,
195             )
196             .is_ok()
197         {
198             // Lock is managed by kernelspace. Call into the kernel
199             // to unblock waiting threads.
200             let ret = abi::lock_unlock(
201                 &self.lock as *const AtomicU32 as *mut abi::lock,
202                 abi::scope::PRIVATE,
203             );
204             assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock");
205         }
206     }
207
208     pub unsafe fn destroy(&self) {
209         assert_eq!(
210             self.lock.load(Ordering::Relaxed),
211             abi::LOCK_UNLOCKED.0,
212             "Attempted to destroy locked rwlock"
213         );
214     }
215 }