1 use std::time::SystemTime;
3 use crate::concurrency::sync::CondvarLock;
4 use crate::concurrency::thread::{MachineCallback, Time};
7 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
9 // Our chosen memory layout for emulation (does not have to match the platform layout!):
10 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
11 // (e.g. PTHREAD_MUTEX_NORMAL).
13 /// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from
14 /// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values,
15 /// but different behaviour, we need a way to distinguish them. We do this by
16 /// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment
17 /// in `pthread_mutexattr_settype` function.
18 const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
20 fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
21 ecx: &mut MiriInterpCx<'mir, 'tcx>,
23 ) -> InterpResult<'tcx, bool> {
24 Ok(kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT"))
27 fn is_mutex_kind_normal<'mir, 'tcx: 'mir>(
28 ecx: &mut MiriInterpCx<'mir, 'tcx>,
30 ) -> InterpResult<'tcx, bool> {
31 let mutex_normal_kind = ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL");
32 Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
35 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
36 ecx: &MiriInterpCx<'mir, 'tcx>,
37 attr_op: &OpTy<'tcx, Provenance>,
38 ) -> InterpResult<'tcx, i32> {
39 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)?.to_i32()
42 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
43 ecx: &mut MiriInterpCx<'mir, 'tcx>,
44 attr_op: &OpTy<'tcx, Provenance>,
46 ) -> InterpResult<'tcx, ()> {
47 ecx.write_scalar_at_offset(attr_op, 0, Scalar::from_i32(kind), ecx.machine.layouts.i32)
50 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
52 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
53 // bytes 0-3: reserved for signature on macOS
54 // (need to avoid this because it is set by static initializer macros)
55 // bytes 4-7: mutex id as u32 or 0 if id is not assigned yet.
56 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
57 // (the kind has to be at its offset for compatibility with static initializer macros)
59 fn mutex_get_id<'mir, 'tcx: 'mir>(
60 ecx: &mut MiriInterpCx<'mir, 'tcx>,
61 mutex_op: &OpTy<'tcx, Provenance>,
62 ) -> InterpResult<'tcx, MutexId> {
63 ecx.mutex_get_or_create_id(mutex_op, 4)
66 fn mutex_reset_id<'mir, 'tcx: 'mir>(
67 ecx: &mut MiriInterpCx<'mir, 'tcx>,
68 mutex_op: &OpTy<'tcx, Provenance>,
69 ) -> InterpResult<'tcx, ()> {
70 ecx.write_scalar_at_offset(mutex_op, 4, Scalar::from_i32(0), ecx.machine.layouts.u32)
73 fn mutex_get_kind<'mir, 'tcx: 'mir>(
74 ecx: &MiriInterpCx<'mir, 'tcx>,
75 mutex_op: &OpTy<'tcx, Provenance>,
76 ) -> InterpResult<'tcx, i32> {
77 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
78 ecx.read_scalar_at_offset(mutex_op, offset, ecx.machine.layouts.i32)?.to_i32()
81 fn mutex_set_kind<'mir, 'tcx: 'mir>(
82 ecx: &mut MiriInterpCx<'mir, 'tcx>,
83 mutex_op: &OpTy<'tcx, Provenance>,
85 ) -> InterpResult<'tcx, ()> {
86 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
87 ecx.write_scalar_at_offset(mutex_op, offset, Scalar::from_i32(kind), ecx.machine.layouts.i32)
90 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
92 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
93 // bytes 0-3: reserved for signature on macOS
94 // (need to avoid this because it is set by static initializer macros)
95 // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
97 fn rwlock_get_id<'mir, 'tcx: 'mir>(
98 ecx: &mut MiriInterpCx<'mir, 'tcx>,
99 rwlock_op: &OpTy<'tcx, Provenance>,
100 ) -> InterpResult<'tcx, RwLockId> {
101 ecx.rwlock_get_or_create_id(rwlock_op, 4)
104 // pthread_condattr_t
106 // Our chosen memory layout for emulation (does not have to match the platform layout!):
107 // store an i32 in the first four bytes equal to the corresponding libc clock id constant
108 // (e.g. CLOCK_REALTIME).
110 fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
111 ecx: &MiriInterpCx<'mir, 'tcx>,
112 attr_op: &OpTy<'tcx, Provenance>,
113 ) -> InterpResult<'tcx, i32> {
114 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)?.to_i32()
117 fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
118 ecx: &mut MiriInterpCx<'mir, 'tcx>,
119 attr_op: &OpTy<'tcx, Provenance>,
121 ) -> InterpResult<'tcx, ()> {
122 ecx.write_scalar_at_offset(attr_op, 0, Scalar::from_i32(clock_id), ecx.machine.layouts.i32)
127 // Our chosen memory layout for the emulated conditional variable (does not have
128 // to match the platform layout!):
130 // bytes 0-3: reserved for signature on macOS
131 // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
132 // bytes 8-11: the clock id constant as i32
134 fn cond_get_id<'mir, 'tcx: 'mir>(
135 ecx: &mut MiriInterpCx<'mir, 'tcx>,
136 cond_op: &OpTy<'tcx, Provenance>,
137 ) -> InterpResult<'tcx, CondvarId> {
138 ecx.condvar_get_or_create_id(cond_op, 4)
141 fn cond_reset_id<'mir, 'tcx: 'mir>(
142 ecx: &mut MiriInterpCx<'mir, 'tcx>,
143 cond_op: &OpTy<'tcx, Provenance>,
144 ) -> InterpResult<'tcx, ()> {
145 ecx.write_scalar_at_offset(cond_op, 4, Scalar::from_i32(0), ecx.machine.layouts.u32)
148 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
149 ecx: &MiriInterpCx<'mir, 'tcx>,
150 cond_op: &OpTy<'tcx, Provenance>,
151 ) -> InterpResult<'tcx, i32> {
152 ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32)?.to_i32()
155 fn cond_set_clock_id<'mir, 'tcx: 'mir>(
156 ecx: &mut MiriInterpCx<'mir, 'tcx>,
157 cond_op: &OpTy<'tcx, Provenance>,
159 ) -> InterpResult<'tcx, ()> {
160 ecx.write_scalar_at_offset(cond_op, 8, Scalar::from_i32(clock_id), ecx.machine.layouts.i32)
163 /// Try to reacquire the mutex associated with the condition variable after we
165 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
166 ecx: &mut MiriInterpCx<'mir, 'tcx>,
169 ) -> InterpResult<'tcx> {
170 ecx.unblock_thread(thread);
171 if ecx.mutex_is_locked(mutex) {
172 ecx.mutex_enqueue_and_block(mutex, thread);
174 ecx.mutex_lock(mutex, thread);
179 /// After a thread waiting on a condvar was signalled:
180 /// Reacquire the conditional variable and remove the timeout callback if any
182 fn post_cond_signal<'mir, 'tcx: 'mir>(
183 ecx: &mut MiriInterpCx<'mir, 'tcx>,
186 ) -> InterpResult<'tcx> {
187 reacquire_cond_mutex(ecx, thread, mutex)?;
188 // Waiting for the mutex is not included in the waiting time because we need
189 // to acquire the mutex always even if we get a timeout.
190 ecx.unregister_timeout_callback_if_exists(thread);
194 /// Release the mutex associated with the condition variable because we are
195 /// entering the waiting state.
196 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
197 ecx: &mut MiriInterpCx<'mir, 'tcx>,
198 active_thread: ThreadId,
200 ) -> InterpResult<'tcx> {
201 if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
202 if old_locked_count != 1 {
203 throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
206 throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
208 ecx.block_thread(active_thread);
212 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
213 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
214 fn pthread_mutexattr_init(
216 attr_op: &OpTy<'tcx, Provenance>,
217 ) -> InterpResult<'tcx, i32> {
218 let this = self.eval_context_mut();
220 let default_kind = this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT");
221 mutexattr_set_kind(this, attr_op, default_kind)?;
226 fn pthread_mutexattr_settype(
228 attr_op: &OpTy<'tcx, Provenance>,
229 kind_op: &OpTy<'tcx, Provenance>,
230 ) -> InterpResult<'tcx, i32> {
231 let this = self.eval_context_mut();
233 let kind = this.read_scalar(kind_op)?.to_i32()?;
234 if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL") {
235 // In `glibc` implementation, the numeric values of
236 // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal.
237 // However, a mutex created by explicitly passing
238 // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour
239 // from the default mutex for which the type was not explicitly
240 // specified. For a more detailed discussion, please see
241 // https://github.com/rust-lang/miri/issues/1419.
243 // To distinguish these two cases in already constructed mutexes, we
244 // use the same trick as glibc: for the case when
245 // `pthread_mutexattr_settype` is caled explicitly, we set the
246 // `PTHREAD_MUTEX_NORMAL_FLAG` flag.
247 let normal_kind = kind | PTHREAD_MUTEX_NORMAL_FLAG;
248 // Check that after setting the flag, the kind is distinguishable
249 // from all other kinds.
250 assert_ne!(normal_kind, this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT"));
251 assert_ne!(normal_kind, this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK"));
252 assert_ne!(normal_kind, this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE"));
253 mutexattr_set_kind(this, attr_op, normal_kind)?;
254 } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
255 || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
256 || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
258 mutexattr_set_kind(this, attr_op, kind)?;
260 let einval = this.eval_libc_i32("EINVAL");
267 fn pthread_mutexattr_destroy(
269 attr_op: &OpTy<'tcx, Provenance>,
270 ) -> InterpResult<'tcx, i32> {
271 let this = self.eval_context_mut();
273 // Destroying an uninit pthread_mutexattr is UB, so check to make sure it's not uninit.
274 mutexattr_get_kind(this, attr_op)?;
276 // To catch double-destroys, we de-initialize the mutexattr.
277 // This is technically not right and might lead to false positives. For example, the below
278 // code is *likely* sound, even assuming uninit numbers are UB, but Miri complains.
280 // let mut x: MaybeUninit<libc::pthread_mutexattr_t> = MaybeUninit::zeroed();
281 // libc::pthread_mutexattr_init(x.as_mut_ptr());
282 // libc::pthread_mutexattr_destroy(x.as_mut_ptr());
285 // However, the way libstd uses the pthread APIs works in our favor here, so we can get away with this.
286 // This can always be revisited to have some external state to catch double-destroys
287 // but not complain about the above code. See https://github.com/rust-lang/miri/pull/1933
288 this.write_uninit(&this.deref_operand(attr_op)?.into())?;
293 fn pthread_mutex_init(
295 mutex_op: &OpTy<'tcx, Provenance>,
296 attr_op: &OpTy<'tcx, Provenance>,
297 ) -> InterpResult<'tcx, i32> {
298 let this = self.eval_context_mut();
300 let attr = this.read_pointer(attr_op)?;
301 let kind = if this.ptr_is_null(attr)? {
302 this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
304 mutexattr_get_kind(this, attr_op)?
307 // Write 0 to use the same code path as the static initializers.
308 mutex_reset_id(this, mutex_op)?;
310 mutex_set_kind(this, mutex_op, kind)?;
315 fn pthread_mutex_lock(&mut self, mutex_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
316 let this = self.eval_context_mut();
318 let kind = mutex_get_kind(this, mutex_op)?;
319 let id = mutex_get_id(this, mutex_op)?;
320 let active_thread = this.get_active_thread();
322 if this.mutex_is_locked(id) {
323 let owner_thread = this.mutex_get_owner(id);
324 if owner_thread != active_thread {
325 // Enqueue the active thread.
326 this.mutex_enqueue_and_block(id, active_thread);
329 // Trying to acquire the same mutex again.
330 if is_mutex_kind_default(this, kind)? {
331 throw_ub_format!("trying to acquire already locked default mutex");
332 } else if is_mutex_kind_normal(this, kind)? {
333 throw_machine_stop!(TerminationInfo::Deadlock);
334 } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
335 Ok(this.eval_libc_i32("EDEADLK"))
336 } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
337 this.mutex_lock(id, active_thread);
341 "called pthread_mutex_lock on an unsupported type of mutex"
346 // The mutex is unlocked. Let's lock it.
347 this.mutex_lock(id, active_thread);
352 fn pthread_mutex_trylock(
354 mutex_op: &OpTy<'tcx, Provenance>,
355 ) -> InterpResult<'tcx, i32> {
356 let this = self.eval_context_mut();
358 let kind = mutex_get_kind(this, mutex_op)?;
359 let id = mutex_get_id(this, mutex_op)?;
360 let active_thread = this.get_active_thread();
362 if this.mutex_is_locked(id) {
363 let owner_thread = this.mutex_get_owner(id);
364 if owner_thread != active_thread {
365 Ok(this.eval_libc_i32("EBUSY"))
367 if is_mutex_kind_default(this, kind)?
368 || is_mutex_kind_normal(this, kind)?
369 || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
371 Ok(this.eval_libc_i32("EBUSY"))
372 } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
373 this.mutex_lock(id, active_thread);
377 "called pthread_mutex_trylock on an unsupported type of mutex"
382 // The mutex is unlocked. Let's lock it.
383 this.mutex_lock(id, active_thread);
388 fn pthread_mutex_unlock(
390 mutex_op: &OpTy<'tcx, Provenance>,
391 ) -> InterpResult<'tcx, i32> {
392 let this = self.eval_context_mut();
394 let kind = mutex_get_kind(this, mutex_op)?;
395 let id = mutex_get_id(this, mutex_op)?;
396 let active_thread = this.get_active_thread();
398 if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
399 // The mutex was locked by the current thread.
402 // The mutex was locked by another thread or not locked at all. See
403 // the “Unlock When Not Owner” column in
404 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
405 if is_mutex_kind_default(this, kind)? {
407 "unlocked a default mutex that was not locked by the current thread"
409 } else if is_mutex_kind_normal(this, kind)? {
411 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
413 } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
414 || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
416 Ok(this.eval_libc_i32("EPERM"))
418 throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
423 fn pthread_mutex_destroy(
425 mutex_op: &OpTy<'tcx, Provenance>,
426 ) -> InterpResult<'tcx, i32> {
427 let this = self.eval_context_mut();
429 let id = mutex_get_id(this, mutex_op)?;
431 if this.mutex_is_locked(id) {
432 throw_ub_format!("destroyed a locked mutex");
435 // Destroying an uninit pthread_mutex is UB, so check to make sure it's not uninit.
436 mutex_get_kind(this, mutex_op)?;
437 mutex_get_id(this, mutex_op)?;
439 // This might lead to false positives, see comment in pthread_mutexattr_destroy
440 this.write_uninit(&this.deref_operand(mutex_op)?.into())?;
441 // FIXME: delete interpreter state associated with this mutex.
446 fn pthread_rwlock_rdlock(
448 rwlock_op: &OpTy<'tcx, Provenance>,
449 ) -> InterpResult<'tcx, i32> {
450 let this = self.eval_context_mut();
452 let id = rwlock_get_id(this, rwlock_op)?;
453 let active_thread = this.get_active_thread();
455 if this.rwlock_is_write_locked(id) {
456 this.rwlock_enqueue_and_block_reader(id, active_thread);
459 this.rwlock_reader_lock(id, active_thread);
464 fn pthread_rwlock_tryrdlock(
466 rwlock_op: &OpTy<'tcx, Provenance>,
467 ) -> InterpResult<'tcx, i32> {
468 let this = self.eval_context_mut();
470 let id = rwlock_get_id(this, rwlock_op)?;
471 let active_thread = this.get_active_thread();
473 if this.rwlock_is_write_locked(id) {
474 Ok(this.eval_libc_i32("EBUSY"))
476 this.rwlock_reader_lock(id, active_thread);
481 fn pthread_rwlock_wrlock(
483 rwlock_op: &OpTy<'tcx, Provenance>,
484 ) -> InterpResult<'tcx, i32> {
485 let this = self.eval_context_mut();
487 let id = rwlock_get_id(this, rwlock_op)?;
488 let active_thread = this.get_active_thread();
490 if this.rwlock_is_locked(id) {
491 // Note: this will deadlock if the lock is already locked by this
492 // thread in any way.
494 // Relevant documentation:
495 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
496 // An in-depth discussion on this topic:
497 // https://github.com/rust-lang/rust/issues/53127
499 // FIXME: Detect and report the deadlock proactively. (We currently
500 // report the deadlock only when no thread can continue execution,
501 // but we could detect that this lock is already locked and report
503 this.rwlock_enqueue_and_block_writer(id, active_thread);
505 this.rwlock_writer_lock(id, active_thread);
511 fn pthread_rwlock_trywrlock(
513 rwlock_op: &OpTy<'tcx, Provenance>,
514 ) -> InterpResult<'tcx, i32> {
515 let this = self.eval_context_mut();
517 let id = rwlock_get_id(this, rwlock_op)?;
518 let active_thread = this.get_active_thread();
520 if this.rwlock_is_locked(id) {
521 Ok(this.eval_libc_i32("EBUSY"))
523 this.rwlock_writer_lock(id, active_thread);
528 fn pthread_rwlock_unlock(
530 rwlock_op: &OpTy<'tcx, Provenance>,
531 ) -> InterpResult<'tcx, i32> {
532 let this = self.eval_context_mut();
534 let id = rwlock_get_id(this, rwlock_op)?;
535 let active_thread = this.get_active_thread();
537 #[allow(clippy::if_same_then_else)]
538 if this.rwlock_reader_unlock(id, active_thread) {
540 } else if this.rwlock_writer_unlock(id, active_thread) {
543 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
547 fn pthread_rwlock_destroy(
549 rwlock_op: &OpTy<'tcx, Provenance>,
550 ) -> InterpResult<'tcx, i32> {
551 let this = self.eval_context_mut();
553 let id = rwlock_get_id(this, rwlock_op)?;
555 if this.rwlock_is_locked(id) {
556 throw_ub_format!("destroyed a locked rwlock");
559 // Destroying an uninit pthread_rwlock is UB, so check to make sure it's not uninit.
560 rwlock_get_id(this, rwlock_op)?;
562 // This might lead to false positives, see comment in pthread_mutexattr_destroy
563 this.write_uninit(&this.deref_operand(rwlock_op)?.into())?;
564 // FIXME: delete interpreter state associated with this rwlock.
569 fn pthread_condattr_init(
571 attr_op: &OpTy<'tcx, Provenance>,
572 ) -> InterpResult<'tcx, i32> {
573 let this = self.eval_context_mut();
575 // The default value of the clock attribute shall refer to the system
577 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
578 let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
579 condattr_set_clock_id(this, attr_op, default_clock_id)?;
584 fn pthread_condattr_setclock(
586 attr_op: &OpTy<'tcx, Provenance>,
587 clock_id_op: &OpTy<'tcx, Provenance>,
588 ) -> InterpResult<'tcx, Scalar<Provenance>> {
589 let this = self.eval_context_mut();
591 let clock_id = this.read_scalar(clock_id_op)?.to_i32()?;
592 if clock_id == this.eval_libc_i32("CLOCK_REALTIME")
593 || clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")
595 condattr_set_clock_id(this, attr_op, clock_id)?;
597 let einval = this.eval_libc_i32("EINVAL");
598 return Ok(Scalar::from_i32(einval));
601 Ok(Scalar::from_i32(0))
604 fn pthread_condattr_getclock(
606 attr_op: &OpTy<'tcx, Provenance>,
607 clk_id_op: &OpTy<'tcx, Provenance>,
608 ) -> InterpResult<'tcx, Scalar<Provenance>> {
609 let this = self.eval_context_mut();
611 let clock_id = condattr_get_clock_id(this, attr_op)?;
612 this.write_scalar(Scalar::from_i32(clock_id), &this.deref_operand(clk_id_op)?.into())?;
614 Ok(Scalar::from_i32(0))
617 fn pthread_condattr_destroy(
619 attr_op: &OpTy<'tcx, Provenance>,
620 ) -> InterpResult<'tcx, i32> {
621 let this = self.eval_context_mut();
623 // Destroying an uninit pthread_condattr is UB, so check to make sure it's not uninit.
624 condattr_get_clock_id(this, attr_op)?;
626 // This might lead to false positives, see comment in pthread_mutexattr_destroy
627 this.write_uninit(&this.deref_operand(attr_op)?.into())?;
632 fn pthread_cond_init(
634 cond_op: &OpTy<'tcx, Provenance>,
635 attr_op: &OpTy<'tcx, Provenance>,
636 ) -> InterpResult<'tcx, i32> {
637 let this = self.eval_context_mut();
639 let attr = this.read_pointer(attr_op)?;
640 let clock_id = if this.ptr_is_null(attr)? {
641 this.eval_libc_i32("CLOCK_REALTIME")
643 condattr_get_clock_id(this, attr_op)?
646 // Write 0 to use the same code path as the static initializers.
647 cond_reset_id(this, cond_op)?;
649 cond_set_clock_id(this, cond_op, clock_id)?;
654 fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
655 let this = self.eval_context_mut();
656 let id = cond_get_id(this, cond_op)?;
657 if let Some((thread, lock)) = this.condvar_signal(id) {
658 if let CondvarLock::Mutex(mutex) = lock {
659 post_cond_signal(this, thread, mutex)?;
661 panic!("condvar should not have an rwlock on unix");
668 fn pthread_cond_broadcast(
670 cond_op: &OpTy<'tcx, Provenance>,
671 ) -> InterpResult<'tcx, i32> {
672 let this = self.eval_context_mut();
673 let id = cond_get_id(this, cond_op)?;
675 while let Some((thread, lock)) = this.condvar_signal(id) {
676 if let CondvarLock::Mutex(mutex) = lock {
677 post_cond_signal(this, thread, mutex)?;
679 panic!("condvar should not have an rwlock on unix");
686 fn pthread_cond_wait(
688 cond_op: &OpTy<'tcx, Provenance>,
689 mutex_op: &OpTy<'tcx, Provenance>,
690 ) -> InterpResult<'tcx, i32> {
691 let this = self.eval_context_mut();
693 let id = cond_get_id(this, cond_op)?;
694 let mutex_id = mutex_get_id(this, mutex_op)?;
695 let active_thread = this.get_active_thread();
697 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
698 this.condvar_wait(id, active_thread, CondvarLock::Mutex(mutex_id));
703 fn pthread_cond_timedwait(
705 cond_op: &OpTy<'tcx, Provenance>,
706 mutex_op: &OpTy<'tcx, Provenance>,
707 abstime_op: &OpTy<'tcx, Provenance>,
708 dest: &PlaceTy<'tcx, Provenance>,
709 ) -> InterpResult<'tcx> {
710 let this = self.eval_context_mut();
712 let id = cond_get_id(this, cond_op)?;
713 let mutex_id = mutex_get_id(this, mutex_op)?;
714 let active_thread = this.get_active_thread();
716 // Extract the timeout.
717 let clock_id = cond_get_clock_id(this, cond_op)?;
718 let duration = match this.read_timespec(&this.deref_operand(abstime_op)?)? {
719 Some(duration) => duration,
721 let einval = this.eval_libc("EINVAL");
722 this.write_scalar(einval, dest)?;
727 let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME") {
728 this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
729 Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
730 } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC") {
731 Time::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
733 throw_unsup_format!("unsupported clock id: {}", clock_id);
736 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
737 this.condvar_wait(id, active_thread, CondvarLock::Mutex(mutex_id));
739 // We return success for now and override it in the timeout callback.
740 this.write_scalar(Scalar::from_i32(0), dest)?;
742 struct Callback<'tcx> {
743 active_thread: ThreadId,
746 dest: PlaceTy<'tcx, Provenance>,
749 impl<'tcx> VisitTags for Callback<'tcx> {
750 fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
751 let Callback { active_thread: _, mutex_id: _, id: _, dest } = self;
752 dest.visit_tags(visit);
756 impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
757 fn call(&self, ecx: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
758 // We are not waiting for the condvar any more, wait for the
760 reacquire_cond_mutex(ecx, self.active_thread, self.mutex_id)?;
762 // Remove the thread from the conditional variable.
763 ecx.condvar_remove_waiter(self.id, self.active_thread);
765 // Set the return value: we timed out.
766 let etimedout = ecx.eval_libc("ETIMEDOUT");
767 ecx.write_scalar(etimedout, &self.dest)?;
773 // Register the timeout callback.
774 let dest = dest.clone();
775 this.register_timeout_callback(
778 Box::new(Callback { active_thread, mutex_id, id, dest }),
784 fn pthread_cond_destroy(
786 cond_op: &OpTy<'tcx, Provenance>,
787 ) -> InterpResult<'tcx, i32> {
788 let this = self.eval_context_mut();
790 let id = cond_get_id(this, cond_op)?;
791 if this.condvar_is_awaited(id) {
792 throw_ub_format!("destroying an awaited conditional variable");
795 // Destroying an uninit pthread_cond is UB, so check to make sure it's not uninit.
796 cond_get_id(this, cond_op)?;
797 cond_get_clock_id(this, cond_op)?;
799 // This might lead to false positives, see comment in pthread_mutexattr_destroy
800 this.write_uninit(&this.deref_operand(cond_op)?.into())?;
801 // FIXME: delete interpreter state associated with this condvar.