1 use std::convert::TryInto;
2 use std::time::{Duration, SystemTime};
4 use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
5 use rustc_target::abi::{LayoutOf, Size};
7 use crate::stacked_borrows::Tag;
8 use crate::thread::Time;
12 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
13 ecx: &MiriEvalContext<'mir, 'tcx>,
14 operand: OpTy<'tcx, Tag>,
16 ) -> InterpResult<'tcx, ()> {
17 let target_ty = match operand.layout.ty.kind {
18 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
19 _ => panic!("Argument to pthread function was not a raw pointer"),
21 let target_layout = ecx.layout_of(target_ty)?;
22 assert!(target_layout.size.bytes() >= min_size);
26 fn get_at_offset<'mir, 'tcx: 'mir>(
27 ecx: &MiriEvalContext<'mir, 'tcx>,
30 layout: TyAndLayout<'tcx>,
32 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
33 // Ensure that the following read at an offset to the attr pointer is within bounds
34 assert_ptr_target_min_size(ecx, op, min_size)?;
35 let op_place = ecx.deref_operand(op)?;
36 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
37 ecx.read_scalar(value_place.into())
40 fn set_at_offset<'mir, 'tcx: 'mir>(
41 ecx: &mut MiriEvalContext<'mir, 'tcx>,
44 value: impl Into<ScalarMaybeUninit<Tag>>,
45 layout: TyAndLayout<'tcx>,
47 ) -> InterpResult<'tcx, ()> {
48 // Ensure that the following write at an offset to the attr pointer is within bounds
49 assert_ptr_target_min_size(ecx, op, min_size)?;
50 let op_place = ecx.deref_operand(op)?;
51 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
52 ecx.write_scalar(value.into(), value_place.into())
55 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
57 // Our chosen memory layout for emulation (does not have to match the platform layout!):
58 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
59 // (e.g. PTHREAD_MUTEX_NORMAL).
61 /// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from
62 /// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values,
63 /// but different behaviour, we need a way to distinguish them. We do this by
64 /// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment
65 /// in `pthread_mutexattr_settype` function.
66 const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
68 const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4;
70 fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
71 ecx: &mut MiriEvalContext<'mir, 'tcx>,
73 ) -> InterpResult<'tcx, bool> {
74 Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?)
77 fn is_mutex_kind_normal<'mir, 'tcx: 'mir>(
78 ecx: &mut MiriEvalContext<'mir, 'tcx>,
80 ) -> InterpResult<'tcx, bool> {
81 let kind = kind.to_i32()?;
82 let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?;
83 Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
86 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
87 ecx: &MiriEvalContext<'mir, 'tcx>,
88 attr_op: OpTy<'tcx, Tag>,
89 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
90 get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
93 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
94 ecx: &mut MiriEvalContext<'mir, 'tcx>,
95 attr_op: OpTy<'tcx, Tag>,
96 kind: impl Into<ScalarMaybeUninit<Tag>>,
97 ) -> InterpResult<'tcx, ()> {
98 set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
101 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
103 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
104 // bytes 0-3: reserved for signature on macOS
105 // (need to avoid this because it is set by static initializer macros)
106 // bytes 4-7: mutex id as u32 or 0 if id is not assigned yet.
107 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
108 // (the kind has to be at its offset for compatibility with static initializer macros)
110 const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24;
112 fn mutex_get_kind<'mir, 'tcx: 'mir>(
113 ecx: &mut MiriEvalContext<'mir, 'tcx>,
114 mutex_op: OpTy<'tcx, Tag>,
115 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
116 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
117 get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
120 fn mutex_set_kind<'mir, 'tcx: 'mir>(
121 ecx: &mut MiriEvalContext<'mir, 'tcx>,
122 mutex_op: OpTy<'tcx, Tag>,
123 kind: impl Into<ScalarMaybeUninit<Tag>>,
124 ) -> InterpResult<'tcx, ()> {
125 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
126 set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
129 fn mutex_get_id<'mir, 'tcx: 'mir>(
130 ecx: &MiriEvalContext<'mir, 'tcx>,
131 mutex_op: OpTy<'tcx, Tag>,
132 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
133 get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
136 fn mutex_set_id<'mir, 'tcx: 'mir>(
137 ecx: &mut MiriEvalContext<'mir, 'tcx>,
138 mutex_op: OpTy<'tcx, Tag>,
139 id: impl Into<ScalarMaybeUninit<Tag>>,
140 ) -> InterpResult<'tcx, ()> {
141 set_at_offset(ecx, mutex_op, 4, id, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
144 fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
145 ecx: &mut MiriEvalContext<'mir, 'tcx>,
146 mutex_op: OpTy<'tcx, Tag>,
147 ) -> InterpResult<'tcx, MutexId> {
148 let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
150 // 0 is a default value and also not a valid mutex id. Need to allocate
152 let id = ecx.mutex_create();
153 mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
156 Ok(MutexId::from_u32(id))
160 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
162 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
163 // bytes 0-3: reserved for signature on macOS
164 // (need to avoid this because it is set by static initializer macros)
165 // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
167 const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 32;
169 fn rwlock_get_id<'mir, 'tcx: 'mir>(
170 ecx: &MiriEvalContext<'mir, 'tcx>,
171 rwlock_op: OpTy<'tcx, Tag>,
172 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
173 get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
176 fn rwlock_set_id<'mir, 'tcx: 'mir>(
177 ecx: &mut MiriEvalContext<'mir, 'tcx>,
178 rwlock_op: OpTy<'tcx, Tag>,
179 id: impl Into<ScalarMaybeUninit<Tag>>,
180 ) -> InterpResult<'tcx, ()> {
181 set_at_offset(ecx, rwlock_op, 4, id, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
184 fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
185 ecx: &mut MiriEvalContext<'mir, 'tcx>,
186 rwlock_op: OpTy<'tcx, Tag>,
187 ) -> InterpResult<'tcx, RwLockId> {
188 let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
190 // 0 is a default value and also not a valid rwlock id. Need to allocate
191 // a new read-write lock.
192 let id = ecx.rwlock_create();
193 rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
196 Ok(RwLockId::from_u32(id))
200 // pthread_condattr_t
202 // Our chosen memory layout for emulation (does not have to match the platform layout!):
203 // store an i32 in the first four bytes equal to the corresponding libc clock id constant
204 // (e.g. CLOCK_REALTIME).
206 const PTHREAD_CONDATTR_T_MIN_SIZE: u64 = 4;
208 fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
209 ecx: &MiriEvalContext<'mir, 'tcx>,
210 attr_op: OpTy<'tcx, Tag>,
211 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
212 get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE)
215 fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
216 ecx: &mut MiriEvalContext<'mir, 'tcx>,
217 attr_op: OpTy<'tcx, Tag>,
218 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
219 ) -> InterpResult<'tcx, ()> {
220 set_at_offset(ecx, attr_op, 0, clock_id, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE)
225 // Our chosen memory layout for the emulated conditional variable (does not have
226 // to match the platform layout!):
228 // bytes 0-3: reserved for signature on macOS
229 // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
230 // bytes 8-11: the clock id constant as i32
232 const PTHREAD_COND_T_MIN_SIZE: u64 = 12;
234 fn cond_get_id<'mir, 'tcx: 'mir>(
235 ecx: &MiriEvalContext<'mir, 'tcx>,
236 cond_op: OpTy<'tcx, Tag>,
237 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
238 get_at_offset(ecx, cond_op, 4, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE)
241 fn cond_set_id<'mir, 'tcx: 'mir>(
242 ecx: &mut MiriEvalContext<'mir, 'tcx>,
243 cond_op: OpTy<'tcx, Tag>,
244 id: impl Into<ScalarMaybeUninit<Tag>>,
245 ) -> InterpResult<'tcx, ()> {
246 set_at_offset(ecx, cond_op, 4, id, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE)
249 fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
250 ecx: &mut MiriEvalContext<'mir, 'tcx>,
251 cond_op: OpTy<'tcx, Tag>,
252 ) -> InterpResult<'tcx, CondvarId> {
253 let id = cond_get_id(ecx, cond_op)?.to_u32()?;
255 // 0 is a default value and also not a valid conditional variable id.
256 // Need to allocate a new id.
257 let id = ecx.condvar_create();
258 cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
261 Ok(CondvarId::from_u32(id))
265 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
266 ecx: &MiriEvalContext<'mir, 'tcx>,
267 cond_op: OpTy<'tcx, Tag>,
268 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
269 get_at_offset(ecx, cond_op, 8, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE)
272 fn cond_set_clock_id<'mir, 'tcx: 'mir>(
273 ecx: &mut MiriEvalContext<'mir, 'tcx>,
274 cond_op: OpTy<'tcx, Tag>,
275 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
276 ) -> InterpResult<'tcx, ()> {
277 set_at_offset(ecx, cond_op, 8, clock_id, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE)
280 /// Try to reacquire the mutex associated with the condition variable after we
282 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
283 ecx: &mut MiriEvalContext<'mir, 'tcx>,
286 ) -> InterpResult<'tcx> {
287 ecx.unblock_thread(thread);
288 if ecx.mutex_is_locked(mutex) {
289 ecx.mutex_enqueue_and_block(mutex, thread);
291 ecx.mutex_lock(mutex, thread);
296 /// After a thread waiting on a condvar was signalled:
297 /// Reacquire the conditional variable and remove the timeout callback if any
299 fn post_cond_signal<'mir, 'tcx: 'mir>(
300 ecx: &mut MiriEvalContext<'mir, 'tcx>,
303 ) -> InterpResult<'tcx> {
304 reacquire_cond_mutex(ecx, thread, mutex)?;
305 // Waiting for the mutex is not included in the waiting time because we need
306 // to acquire the mutex always even if we get a timeout.
307 ecx.unregister_timeout_callback_if_exists(thread);
311 /// Release the mutex associated with the condition variable because we are
312 /// entering the waiting state.
313 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
314 ecx: &mut MiriEvalContext<'mir, 'tcx>,
315 active_thread: ThreadId,
317 ) -> InterpResult<'tcx> {
318 if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread)? {
319 if old_locked_count != 1 {
320 throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
323 throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
325 ecx.block_thread(active_thread);
329 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
330 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
331 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
332 let this = self.eval_context_mut();
334 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
335 mutexattr_set_kind(this, attr_op, default_kind)?;
340 fn pthread_mutexattr_settype(
342 attr_op: OpTy<'tcx, Tag>,
343 kind_op: OpTy<'tcx, Tag>,
344 ) -> InterpResult<'tcx, i32> {
345 let this = self.eval_context_mut();
347 let kind = this.read_scalar(kind_op)?.not_undef()?;
348 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
349 // In `glibc` implementation, the numeric values of
350 // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal.
351 // However, a mutex created by explicitly passing
352 // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour
353 // from the default mutex for which the type was not explicitly
354 // specified. For a more detailed discussion, please see
355 // https://github.com/rust-lang/miri/issues/1419.
357 // To distinguish these two cases in already constructed mutexes, we
358 // use the same trick as glibc: for the case when
359 // `pthread_mutexattr_settype` is caled explicitly, we set the
360 // `PTHREAD_MUTEX_NORMAL_FLAG` flag.
361 let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG;
362 // Check that after setting the flag, the kind is distinguishable
363 // from all other kinds.
364 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?);
365 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?);
366 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?);
367 mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?;
368 } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
369 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
370 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
372 mutexattr_set_kind(this, attr_op, kind)?;
374 let einval = this.eval_libc_i32("EINVAL")?;
381 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
382 let this = self.eval_context_mut();
384 mutexattr_set_kind(this, attr_op, ScalarMaybeUninit::Uninit)?;
389 fn pthread_mutex_init(
391 mutex_op: OpTy<'tcx, Tag>,
392 attr_op: OpTy<'tcx, Tag>,
393 ) -> InterpResult<'tcx, i32> {
394 let this = self.eval_context_mut();
396 let attr = this.read_scalar(attr_op)?.not_undef()?;
397 let kind = if this.is_null(attr)? {
398 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
400 mutexattr_get_kind(this, attr_op)?.not_undef()?
403 // Write 0 to use the same code path as the static initializers.
404 mutex_set_id(this, mutex_op, Scalar::from_i32(0))?;
406 mutex_set_kind(this, mutex_op, kind)?;
411 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
412 let this = self.eval_context_mut();
414 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
415 let id = mutex_get_or_create_id(this, mutex_op)?;
416 let active_thread = this.get_active_thread();
418 if this.mutex_is_locked(id) {
419 let owner_thread = this.mutex_get_owner(id);
420 if owner_thread != active_thread {
421 // Enqueue the active thread.
422 this.mutex_enqueue_and_block(id, active_thread);
425 // Trying to acquire the same mutex again.
426 if is_mutex_kind_default(this, kind)? {
427 throw_ub_format!("trying to acquire already locked default mutex");
428 } else if is_mutex_kind_normal(this, kind)? {
429 throw_machine_stop!(TerminationInfo::Deadlock);
430 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
431 this.eval_libc_i32("EDEADLK")
432 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
433 this.mutex_lock(id, active_thread);
437 "called pthread_mutex_lock on an unsupported type of mutex"
442 // The mutex is unlocked. Let's lock it.
443 this.mutex_lock(id, active_thread);
448 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
449 let this = self.eval_context_mut();
451 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
452 let id = mutex_get_or_create_id(this, mutex_op)?;
453 let active_thread = this.get_active_thread();
455 if this.mutex_is_locked(id) {
456 let owner_thread = this.mutex_get_owner(id);
457 if owner_thread != active_thread {
458 this.eval_libc_i32("EBUSY")
460 if is_mutex_kind_default(this, kind)?
461 || is_mutex_kind_normal(this, kind)?
462 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
464 this.eval_libc_i32("EBUSY")
465 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
466 this.mutex_lock(id, active_thread);
470 "called pthread_mutex_trylock on an unsupported type of mutex"
475 // The mutex is unlocked. Let's lock it.
476 this.mutex_lock(id, active_thread);
481 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
482 let this = self.eval_context_mut();
484 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
485 let id = mutex_get_or_create_id(this, mutex_op)?;
486 let active_thread = this.get_active_thread();
488 if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread)? {
489 // The mutex was locked by the current thread.
492 // The mutex was locked by another thread or not locked at all. See
493 // the “Unlock When Not Owner” column in
494 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
495 if is_mutex_kind_default(this, kind)? {
497 "unlocked a default mutex that was not locked by the current thread"
499 } else if is_mutex_kind_normal(this, kind)? {
501 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
503 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
504 this.eval_libc_i32("EPERM")
505 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
506 this.eval_libc_i32("EPERM")
508 throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
513 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
514 let this = self.eval_context_mut();
516 let id = mutex_get_or_create_id(this, mutex_op)?;
518 if this.mutex_is_locked(id) {
519 throw_ub_format!("destroyed a locked mutex");
522 mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?;
523 mutex_set_id(this, mutex_op, ScalarMaybeUninit::Uninit)?;
528 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
529 let this = self.eval_context_mut();
531 let id = rwlock_get_or_create_id(this, rwlock_op)?;
532 let active_thread = this.get_active_thread();
534 if this.rwlock_is_write_locked(id) {
535 this.rwlock_enqueue_and_block_reader(id, active_thread);
538 this.rwlock_reader_lock(id, active_thread);
543 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
544 let this = self.eval_context_mut();
546 let id = rwlock_get_or_create_id(this, rwlock_op)?;
547 let active_thread = this.get_active_thread();
549 if this.rwlock_is_write_locked(id) {
550 this.eval_libc_i32("EBUSY")
552 this.rwlock_reader_lock(id, active_thread);
557 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
558 let this = self.eval_context_mut();
560 let id = rwlock_get_or_create_id(this, rwlock_op)?;
561 let active_thread = this.get_active_thread();
563 if this.rwlock_is_locked(id) {
564 // Note: this will deadlock if the lock is already locked by this
565 // thread in any way.
567 // Relevant documentation:
568 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
569 // An in-depth discussion on this topic:
570 // https://github.com/rust-lang/rust/issues/53127
572 // FIXME: Detect and report the deadlock proactively. (We currently
573 // report the deadlock only when no thread can continue execution,
574 // but we could detect that this lock is already locked and report
576 this.rwlock_enqueue_and_block_writer(id, active_thread);
578 this.rwlock_writer_lock(id, active_thread);
584 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
585 let this = self.eval_context_mut();
587 let id = rwlock_get_or_create_id(this, rwlock_op)?;
588 let active_thread = this.get_active_thread();
590 if this.rwlock_is_locked(id) {
591 this.eval_libc_i32("EBUSY")
593 this.rwlock_writer_lock(id, active_thread);
598 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
599 let this = self.eval_context_mut();
601 let id = rwlock_get_or_create_id(this, rwlock_op)?;
602 let active_thread = this.get_active_thread();
604 if this.rwlock_reader_unlock(id, active_thread) {
605 // The thread was a reader.
606 if this.rwlock_is_locked(id) {
607 // No more readers owning the lock. Give it to a writer if there
609 this.rwlock_dequeue_and_lock_writer(id);
612 } else if Some(active_thread) == this.rwlock_writer_unlock(id) {
613 // The thread was a writer.
615 // We are prioritizing writers here against the readers. As a
616 // result, not only readers can starve writers, but also writers can
618 if this.rwlock_dequeue_and_lock_writer(id) {
619 // Someone got the write lock, nice.
621 // Give the lock to all readers.
622 while this.rwlock_dequeue_and_lock_reader(id) {
628 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
632 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
633 let this = self.eval_context_mut();
635 let id = rwlock_get_or_create_id(this, rwlock_op)?;
637 if this.rwlock_is_locked(id) {
638 throw_ub_format!("destroyed a locked rwlock");
641 rwlock_set_id(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
646 fn pthread_condattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
647 let this = self.eval_context_mut();
649 // The default value of the clock attribute shall refer to the system
651 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
652 let default_clock_id = this.eval_libc("CLOCK_REALTIME")?;
653 condattr_set_clock_id(this, attr_op, default_clock_id)?;
658 fn pthread_condattr_setclock(
660 attr_op: OpTy<'tcx, Tag>,
661 clock_id_op: OpTy<'tcx, Tag>,
662 ) -> InterpResult<'tcx, i32> {
663 let this = self.eval_context_mut();
665 let clock_id = this.read_scalar(clock_id_op)?.not_undef()?;
666 if clock_id == this.eval_libc("CLOCK_REALTIME")?
667 || clock_id == this.eval_libc("CLOCK_MONOTONIC")?
669 condattr_set_clock_id(this, attr_op, clock_id)?;
671 let einval = this.eval_libc_i32("EINVAL")?;
678 fn pthread_condattr_getclock(
680 attr_op: OpTy<'tcx, Tag>,
681 clk_id_op: OpTy<'tcx, Tag>,
682 ) -> InterpResult<'tcx, i32> {
683 let this = self.eval_context_mut();
685 let clock_id = condattr_get_clock_id(this, attr_op)?;
686 this.write_scalar(clock_id, this.deref_operand(clk_id_op)?.into())?;
691 fn pthread_condattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
692 let this = self.eval_context_mut();
694 condattr_set_clock_id(this, attr_op, ScalarMaybeUninit::Uninit)?;
699 fn pthread_cond_init(
701 cond_op: OpTy<'tcx, Tag>,
702 attr_op: OpTy<'tcx, Tag>,
703 ) -> InterpResult<'tcx, i32> {
704 let this = self.eval_context_mut();
706 let attr = this.read_scalar(attr_op)?.not_undef()?;
707 let clock_id = if this.is_null(attr)? {
708 this.eval_libc("CLOCK_REALTIME")?
710 condattr_get_clock_id(this, attr_op)?.not_undef()?
713 // Write 0 to use the same code path as the static initializers.
714 cond_set_id(this, cond_op, Scalar::from_i32(0))?;
716 cond_set_clock_id(this, cond_op, clock_id)?;
721 fn pthread_cond_signal(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
722 let this = self.eval_context_mut();
723 let id = cond_get_or_create_id(this, cond_op)?;
724 if let Some((thread, mutex)) = this.condvar_signal(id) {
725 post_cond_signal(this, thread, mutex)?;
731 fn pthread_cond_broadcast(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
732 let this = self.eval_context_mut();
733 let id = cond_get_or_create_id(this, cond_op)?;
735 while let Some((thread, mutex)) = this.condvar_signal(id) {
736 post_cond_signal(this, thread, mutex)?;
742 fn pthread_cond_wait(
744 cond_op: OpTy<'tcx, Tag>,
745 mutex_op: OpTy<'tcx, Tag>,
746 ) -> InterpResult<'tcx, i32> {
747 let this = self.eval_context_mut();
749 let id = cond_get_or_create_id(this, cond_op)?;
750 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
751 let active_thread = this.get_active_thread();
753 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
754 this.condvar_wait(id, active_thread, mutex_id);
759 fn pthread_cond_timedwait(
761 cond_op: OpTy<'tcx, Tag>,
762 mutex_op: OpTy<'tcx, Tag>,
763 abstime_op: OpTy<'tcx, Tag>,
764 dest: PlaceTy<'tcx, Tag>,
765 ) -> InterpResult<'tcx> {
766 let this = self.eval_context_mut();
768 this.check_no_isolation("pthread_cond_timedwait")?;
770 let id = cond_get_or_create_id(this, cond_op)?;
771 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
772 let active_thread = this.get_active_thread();
774 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
775 this.condvar_wait(id, active_thread, mutex_id);
777 // We return success for now and override it in the timeout callback.
778 this.write_scalar(Scalar::from_i32(0), dest)?;
780 // Extract the timeout.
781 let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?;
783 let tp = this.deref_operand(abstime_op)?;
784 let seconds_place = this.mplace_field(tp, 0)?;
785 let seconds = this.read_scalar(seconds_place.into())?;
786 let nanoseconds_place = this.mplace_field(tp, 1)?;
787 let nanoseconds = this.read_scalar(nanoseconds_place.into())?;
788 let (seconds, nanoseconds) = (
789 seconds.to_machine_usize(this)?,
790 nanoseconds.to_machine_usize(this)?.try_into().unwrap(),
792 Duration::new(seconds, nanoseconds)
795 let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? {
796 Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
797 } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? {
798 Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap())
800 throw_unsup_format!("unsupported clock id: {}", clock_id);
803 // Register the timeout callback.
804 this.register_timeout_callback(
807 Box::new(move |ecx| {
808 // We are not waiting for the condvar any more, wait for the
810 reacquire_cond_mutex(ecx, active_thread, mutex_id)?;
812 // Remove the thread from the conditional variable.
813 ecx.condvar_remove_waiter(id, active_thread);
815 // Set the return value: we timed out.
816 let timeout = ecx.eval_libc_i32("ETIMEDOUT")?;
817 ecx.write_scalar(Scalar::from_i32(timeout), dest)?;
826 fn pthread_cond_destroy(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
827 let this = self.eval_context_mut();
829 let id = cond_get_or_create_id(this, cond_op)?;
830 if this.condvar_is_awaited(id) {
831 throw_ub_format!("destroying an awaited conditional variable");
833 cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
834 cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;