1 use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
5 use crate::thread::BlockSetId;
8 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
9 ecx: &MiriEvalContext<'mir, 'tcx>,
10 operand: OpTy<'tcx, Tag>,
12 ) -> InterpResult<'tcx, ()> {
13 let target_ty = match operand.layout.ty.kind {
14 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
15 _ => panic!("Argument to pthread function was not a raw pointer"),
17 let target_layout = ecx.layout_of(target_ty)?;
18 assert!(target_layout.size.bytes() >= min_size);
22 fn get_at_offset<'mir, 'tcx: 'mir>(
23 ecx: &MiriEvalContext<'mir, 'tcx>,
26 layout: TyAndLayout<'tcx>,
28 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
29 // Ensure that the following read at an offset to the attr pointer is within bounds
30 assert_ptr_target_min_size(ecx, op, min_size)?;
31 let op_place = ecx.deref_operand(op)?;
32 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
33 ecx.read_scalar(value_place.into())
36 fn set_at_offset<'mir, 'tcx: 'mir>(
37 ecx: &mut MiriEvalContext<'mir, 'tcx>,
40 value: impl Into<ScalarMaybeUninit<Tag>>,
41 layout: TyAndLayout<'tcx>,
43 ) -> InterpResult<'tcx, ()> {
44 // Ensure that the following write at an offset to the attr pointer is within bounds
45 assert_ptr_target_min_size(ecx, op, min_size)?;
46 let op_place = ecx.deref_operand(op)?;
47 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
48 ecx.write_scalar(value.into(), value_place.into())
51 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
53 // Our chosen memory layout for emulation (does not have to match the platform layout!):
54 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
55 // (e.g. PTHREAD_MUTEX_NORMAL).
57 const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4;
59 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
60 ecx: &MiriEvalContext<'mir, 'tcx>,
61 attr_op: OpTy<'tcx, Tag>,
62 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
63 get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
66 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
67 ecx: &mut MiriEvalContext<'mir, 'tcx>,
68 attr_op: OpTy<'tcx, Tag>,
69 kind: impl Into<ScalarMaybeUninit<Tag>>,
70 ) -> InterpResult<'tcx, ()> {
71 set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
74 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
76 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
77 // bytes 0-3: reserved for signature on macOS
78 // (need to avoid this because it is set by static initializer macros)
79 // bytes 4-7: count of how many times this mutex has been locked, as a u32
80 // bytes 8-11: when count > 0, id of the owner thread as a u32
81 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
82 // (the kind has to be at its offset for compatibility with static initializer macros)
83 // bytes 20-23: when count > 0, id of the blockset in which the blocked threads
84 // are waiting or 0 if blockset is not yet assigned.
86 const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24;
88 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
89 ecx: &MiriEvalContext<'mir, 'tcx>,
90 mutex_op: OpTy<'tcx, Tag>,
91 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
92 get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
95 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
96 ecx: &mut MiriEvalContext<'mir, 'tcx>,
97 mutex_op: OpTy<'tcx, Tag>,
98 locked_count: impl Into<ScalarMaybeUninit<Tag>>,
99 ) -> InterpResult<'tcx, ()> {
100 set_at_offset(ecx, mutex_op, 4, locked_count, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
103 fn mutex_get_owner<'mir, 'tcx: 'mir>(
104 ecx: &MiriEvalContext<'mir, 'tcx>,
105 mutex_op: OpTy<'tcx, Tag>,
106 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
107 get_at_offset(ecx, mutex_op, 8, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
110 fn mutex_set_owner<'mir, 'tcx: 'mir>(
111 ecx: &mut MiriEvalContext<'mir, 'tcx>,
112 mutex_op: OpTy<'tcx, Tag>,
113 owner: impl Into<ScalarMaybeUninit<Tag>>,
114 ) -> InterpResult<'tcx, ()> {
115 set_at_offset(ecx, mutex_op, 8, owner, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
118 fn mutex_get_kind<'mir, 'tcx: 'mir>(
119 ecx: &mut MiriEvalContext<'mir, 'tcx>,
120 mutex_op: OpTy<'tcx, Tag>,
121 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
122 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
123 get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
126 fn mutex_set_kind<'mir, 'tcx: 'mir>(
127 ecx: &mut MiriEvalContext<'mir, 'tcx>,
128 mutex_op: OpTy<'tcx, Tag>,
129 kind: impl Into<ScalarMaybeUninit<Tag>>,
130 ) -> InterpResult<'tcx, ()> {
131 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
132 set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
135 fn mutex_get_blockset<'mir, 'tcx: 'mir>(
136 ecx: &MiriEvalContext<'mir, 'tcx>,
137 mutex_op: OpTy<'tcx, Tag>,
138 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
139 get_at_offset(ecx, mutex_op, 20, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
142 fn mutex_set_blockset<'mir, 'tcx: 'mir>(
143 ecx: &mut MiriEvalContext<'mir, 'tcx>,
144 mutex_op: OpTy<'tcx, Tag>,
145 blockset: impl Into<ScalarMaybeUninit<Tag>>,
146 ) -> InterpResult<'tcx, ()> {
147 set_at_offset(ecx, mutex_op, 20, blockset, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
150 fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>(
151 ecx: &mut MiriEvalContext<'mir, 'tcx>,
152 mutex_op: OpTy<'tcx, Tag>,
153 ) -> InterpResult<'tcx, BlockSetId> {
154 let blockset = mutex_get_blockset(ecx, mutex_op)?.to_u32()?;
156 // 0 is a default value and also not a valid blockset id. Need to
157 // allocate a new blockset.
158 let blockset = ecx.create_blockset()?;
159 mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?;
162 Ok(BlockSetId::new(blockset))
166 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
168 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
169 // bytes 0-3: reserved for signature on macOS
170 // (need to avoid this because it is set by static initializer macros)
171 // bytes 4-7: reader count, as a u32
172 // bytes 8-11: writer count, as a u32
173 // bytes 12-15: when writer or reader count > 0, id of the blockset in which the
174 // blocked writers are waiting or 0 if blockset is not yet assigned.
175 // bytes 16-20: when writer count > 0, id of the blockset in which the blocked
176 // readers are waiting or 0 if blockset is not yet assigned.
178 const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 20;
180 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
181 ecx: &MiriEvalContext<'mir, 'tcx>,
182 rwlock_op: OpTy<'tcx, Tag>,
183 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
184 get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
187 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
188 ecx: &mut MiriEvalContext<'mir, 'tcx>,
189 rwlock_op: OpTy<'tcx, Tag>,
190 readers: impl Into<ScalarMaybeUninit<Tag>>,
191 ) -> InterpResult<'tcx, ()> {
192 set_at_offset(ecx, rwlock_op, 4, readers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
195 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
196 ecx: &MiriEvalContext<'mir, 'tcx>,
197 rwlock_op: OpTy<'tcx, Tag>,
198 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
199 get_at_offset(ecx, rwlock_op, 8, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
202 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
203 ecx: &mut MiriEvalContext<'mir, 'tcx>,
204 rwlock_op: OpTy<'tcx, Tag>,
205 writers: impl Into<ScalarMaybeUninit<Tag>>,
206 ) -> InterpResult<'tcx, ()> {
207 set_at_offset(ecx, rwlock_op, 8, writers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
210 fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>(
211 ecx: &MiriEvalContext<'mir, 'tcx>,
212 rwlock_op: OpTy<'tcx, Tag>,
213 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
214 get_at_offset(ecx, rwlock_op, 12, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
217 fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>(
218 ecx: &mut MiriEvalContext<'mir, 'tcx>,
219 rwlock_op: OpTy<'tcx, Tag>,
220 blockset: impl Into<ScalarMaybeUninit<Tag>>,
221 ) -> InterpResult<'tcx, ()> {
222 set_at_offset(ecx, rwlock_op, 12, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
225 fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>(
226 ecx: &mut MiriEvalContext<'mir, 'tcx>,
227 rwlock_op: OpTy<'tcx, Tag>,
228 ) -> InterpResult<'tcx, BlockSetId> {
229 let blockset = rwlock_get_writer_blockset(ecx, rwlock_op)?.to_u32()?;
231 // 0 is a default value and also not a valid blockset id. Need to
232 // allocate a new blockset.
233 let blockset = ecx.create_blockset()?;
234 rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
237 Ok(BlockSetId::new(blockset))
241 fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>(
242 ecx: &MiriEvalContext<'mir, 'tcx>,
243 rwlock_op: OpTy<'tcx, Tag>,
244 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
245 get_at_offset(ecx, rwlock_op, 16, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
248 fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>(
249 ecx: &mut MiriEvalContext<'mir, 'tcx>,
250 rwlock_op: OpTy<'tcx, Tag>,
251 blockset: impl Into<ScalarMaybeUninit<Tag>>,
252 ) -> InterpResult<'tcx, ()> {
253 set_at_offset(ecx, rwlock_op, 16, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
256 fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>(
257 ecx: &mut MiriEvalContext<'mir, 'tcx>,
258 rwlock_op: OpTy<'tcx, Tag>,
259 ) -> InterpResult<'tcx, BlockSetId> {
260 let blockset = rwlock_get_reader_blockset(ecx, rwlock_op)?.to_u32()?;
262 // 0 is a default value and also not a valid blockset id. Need to
263 // allocate a new blockset.
264 let blockset = ecx.create_blockset()?;
265 rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
268 Ok(BlockSetId::new(blockset))
272 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
273 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
274 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
275 let this = self.eval_context_mut();
277 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
278 mutexattr_set_kind(this, attr_op, default_kind)?;
283 fn pthread_mutexattr_settype(
285 attr_op: OpTy<'tcx, Tag>,
286 kind_op: OpTy<'tcx, Tag>,
287 ) -> InterpResult<'tcx, i32> {
288 let this = self.eval_context_mut();
290 let kind = this.read_scalar(kind_op)?.not_undef()?;
291 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
292 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
293 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
295 mutexattr_set_kind(this, attr_op, kind)?;
297 let einval = this.eval_libc_i32("EINVAL")?;
304 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
305 let this = self.eval_context_mut();
307 mutexattr_set_kind(this, attr_op, ScalarMaybeUninit::Uninit)?;
312 fn pthread_mutex_init(
314 mutex_op: OpTy<'tcx, Tag>,
315 attr_op: OpTy<'tcx, Tag>,
316 ) -> InterpResult<'tcx, i32> {
317 let this = self.eval_context_mut();
319 let attr = this.read_scalar(attr_op)?.not_undef()?;
320 let kind = if this.is_null(attr)? {
321 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
323 mutexattr_get_kind(this, attr_op)?.not_undef()?
326 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
327 mutex_set_kind(this, mutex_op, kind)?;
332 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
333 let this = self.eval_context_mut();
335 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
336 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
337 let active_thread = this.get_active_thread()?;
339 if locked_count == 0 {
340 // The mutex is unlocked. Let's lock it.
341 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
342 mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
345 // The mutex is locked. Let's check by whom.
346 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
347 if owner_thread != active_thread {
348 // Block the active thread.
349 let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
350 this.block_active_thread(blockset)?;
353 // Trying to acquire the same mutex again.
354 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
355 throw_machine_stop!(TerminationInfo::Deadlock);
356 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
357 this.eval_libc_i32("EDEADLK")
358 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
359 match locked_count.checked_add(1) {
361 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
364 None => this.eval_libc_i32("EAGAIN"),
367 throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex");
373 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
374 let this = self.eval_context_mut();
376 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
377 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
378 let active_thread = this.get_active_thread()?;
380 if locked_count == 0 {
381 // The mutex is unlocked. Let's lock it.
382 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
383 mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
386 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
387 if owner_thread != active_thread {
388 this.eval_libc_i32("EBUSY")
390 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
391 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
393 this.eval_libc_i32("EBUSY")
394 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
395 match locked_count.checked_add(1) {
397 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
400 None => this.eval_libc_i32("EAGAIN"),
404 "called pthread_mutex_trylock on an unsupported type of mutex"
411 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
412 let this = self.eval_context_mut();
414 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
415 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
416 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
418 if owner_thread != this.get_active_thread()? {
419 throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread");
420 } else if locked_count == 1 {
421 let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
422 if let Some(new_owner) = this.unblock_some_thread(blockset)? {
423 // We have at least one thread waiting on this mutex. Transfer
425 mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?;
427 // No thread is waiting on this mutex.
428 mutex_set_owner(this, mutex_op, Scalar::from_u32(0))?;
429 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
433 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
434 throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked");
435 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
436 this.eval_libc_i32("EPERM")
437 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
438 match locked_count.checked_sub(1) {
440 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
444 // locked_count was already zero
445 this.eval_libc_i32("EPERM")
449 throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex");
454 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
455 let this = self.eval_context_mut();
457 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
458 throw_ub_format!("destroyed a locked mutex");
461 mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?;
462 mutex_set_locked_count(this, mutex_op, ScalarMaybeUninit::Uninit)?;
463 mutex_set_blockset(this, mutex_op, ScalarMaybeUninit::Uninit)?;
468 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
469 let this = self.eval_context_mut();
471 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
472 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
475 // The lock is locked by a writer.
476 assert_eq!(writers, 1);
477 let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
478 this.block_active_thread(reader_blockset)?;
481 match readers.checked_add(1) {
482 Some(new_readers) => {
483 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
486 None => this.eval_libc_i32("EAGAIN"),
491 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
492 let this = self.eval_context_mut();
494 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
495 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
497 this.eval_libc_i32("EBUSY")
499 match readers.checked_add(1) {
500 Some(new_readers) => {
501 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
504 None => this.eval_libc_i32("EAGAIN"),
509 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
510 let this = self.eval_context_mut();
512 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
513 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
514 let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
515 if readers != 0 || writers != 0 {
516 this.block_active_thread(writer_blockset)?;
518 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
523 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
524 let this = self.eval_context_mut();
526 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
527 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
528 if readers != 0 || writers != 0 {
529 this.eval_libc_i32("EBUSY")
531 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
536 // FIXME: We should check that this lock was locked by the active thread.
537 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
538 let this = self.eval_context_mut();
540 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
541 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
542 let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
543 if let Some(new_readers) = readers.checked_sub(1) {
544 assert_eq!(writers, 0);
545 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
546 if new_readers == 0 {
547 if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
548 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
552 } else if writers != 0 {
553 let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
554 // We are prioritizing writers here against the readers. As a
555 // result, not only readers can starve writers, but also writers can
557 if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
558 assert_eq!(writers, 1);
560 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
562 while let Some(_reader) = this.unblock_some_thread(reader_blockset)? {
565 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))?
569 throw_ub_format!("unlocked an rwlock that was not locked");
573 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
574 let this = self.eval_context_mut();
576 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0
577 || rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0
579 throw_ub_format!("destroyed a locked rwlock");
582 rwlock_set_readers(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
583 rwlock_set_writers(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
584 rwlock_set_reader_blockset(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
585 rwlock_set_writer_blockset(this, rwlock_op, ScalarMaybeUninit::Uninit)?;