1 use std::time::SystemTime;
4 use helpers::TimespecError;
5 use stacked_borrows::Tag;
8 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
10 // Our chosen memory layout for emulation (does not have to match the platform layout!):
11 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
12 // (e.g. PTHREAD_MUTEX_NORMAL).
14 /// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from
15 /// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values,
16 /// but different behaviour, we need a way to distinguish them. We do this by
17 /// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment
18 /// in `pthread_mutexattr_settype` function.
19 const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
21 fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
22 ecx: &mut MiriEvalContext<'mir, 'tcx>,
24 ) -> InterpResult<'tcx, bool> {
25 Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?)
28 fn is_mutex_kind_normal<'mir, 'tcx: 'mir>(
29 ecx: &mut MiriEvalContext<'mir, 'tcx>,
31 ) -> InterpResult<'tcx, bool> {
32 let kind = kind.to_i32()?;
33 let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?;
34 Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
37 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
38 ecx: &MiriEvalContext<'mir, 'tcx>,
39 attr_op: OpTy<'tcx, Tag>,
40 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
41 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
44 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
45 ecx: &mut MiriEvalContext<'mir, 'tcx>,
46 attr_op: OpTy<'tcx, Tag>,
47 kind: impl Into<ScalarMaybeUninit<Tag>>,
48 ) -> InterpResult<'tcx, ()> {
49 ecx.write_scalar_at_offset(attr_op, 0, kind, ecx.machine.layouts.i32)
52 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
54 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
55 // bytes 0-3: reserved for signature on macOS
56 // (need to avoid this because it is set by static initializer macros)
57 // bytes 4-7: mutex id as u32 or 0 if id is not assigned yet.
58 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
59 // (the kind has to be at its offset for compatibility with static initializer macros)
61 fn mutex_get_kind<'mir, 'tcx: 'mir>(
62 ecx: &mut MiriEvalContext<'mir, 'tcx>,
63 mutex_op: OpTy<'tcx, Tag>,
64 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
65 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
66 ecx.read_scalar_at_offset(mutex_op, offset, ecx.machine.layouts.i32)
69 fn mutex_set_kind<'mir, 'tcx: 'mir>(
70 ecx: &mut MiriEvalContext<'mir, 'tcx>,
71 mutex_op: OpTy<'tcx, Tag>,
72 kind: impl Into<ScalarMaybeUninit<Tag>>,
73 ) -> InterpResult<'tcx, ()> {
74 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
75 ecx.write_scalar_at_offset(mutex_op, offset, kind, ecx.machine.layouts.i32)
78 fn mutex_get_id<'mir, 'tcx: 'mir>(
79 ecx: &MiriEvalContext<'mir, 'tcx>,
80 mutex_op: OpTy<'tcx, Tag>,
81 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
82 ecx.read_scalar_at_offset(mutex_op, 4, ecx.machine.layouts.u32)
85 fn mutex_set_id<'mir, 'tcx: 'mir>(
86 ecx: &mut MiriEvalContext<'mir, 'tcx>,
87 mutex_op: OpTy<'tcx, Tag>,
88 id: impl Into<ScalarMaybeUninit<Tag>>,
89 ) -> InterpResult<'tcx, ()> {
90 ecx.write_scalar_at_offset(mutex_op, 4, id, ecx.machine.layouts.u32)
93 fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
94 ecx: &mut MiriEvalContext<'mir, 'tcx>,
95 mutex_op: OpTy<'tcx, Tag>,
96 ) -> InterpResult<'tcx, MutexId> {
97 let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
99 // 0 is a default value and also not a valid mutex id. Need to allocate
101 let id = ecx.mutex_create();
102 mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
105 Ok(MutexId::from_u32(id))
109 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
111 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
112 // bytes 0-3: reserved for signature on macOS
113 // (need to avoid this because it is set by static initializer macros)
114 // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
116 fn rwlock_get_id<'mir, 'tcx: 'mir>(
117 ecx: &MiriEvalContext<'mir, 'tcx>,
118 rwlock_op: OpTy<'tcx, Tag>,
119 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
120 ecx.read_scalar_at_offset(rwlock_op, 4, ecx.machine.layouts.u32)
123 fn rwlock_set_id<'mir, 'tcx: 'mir>(
124 ecx: &mut MiriEvalContext<'mir, 'tcx>,
125 rwlock_op: OpTy<'tcx, Tag>,
126 id: impl Into<ScalarMaybeUninit<Tag>>,
127 ) -> InterpResult<'tcx, ()> {
128 ecx.write_scalar_at_offset(rwlock_op, 4, id, ecx.machine.layouts.u32)
131 fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
132 ecx: &mut MiriEvalContext<'mir, 'tcx>,
133 rwlock_op: OpTy<'tcx, Tag>,
134 ) -> InterpResult<'tcx, RwLockId> {
135 let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
137 // 0 is a default value and also not a valid rwlock id. Need to allocate
138 // a new read-write lock.
139 let id = ecx.rwlock_create();
140 rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
143 Ok(RwLockId::from_u32(id))
147 // pthread_condattr_t
149 // Our chosen memory layout for emulation (does not have to match the platform layout!):
150 // store an i32 in the first four bytes equal to the corresponding libc clock id constant
151 // (e.g. CLOCK_REALTIME).
153 fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
154 ecx: &MiriEvalContext<'mir, 'tcx>,
155 attr_op: OpTy<'tcx, Tag>,
156 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
157 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
160 fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
161 ecx: &mut MiriEvalContext<'mir, 'tcx>,
162 attr_op: OpTy<'tcx, Tag>,
163 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
164 ) -> InterpResult<'tcx, ()> {
165 ecx.write_scalar_at_offset(attr_op, 0, clock_id, ecx.machine.layouts.i32)
170 // Our chosen memory layout for the emulated conditional variable (does not have
171 // to match the platform layout!):
173 // bytes 0-3: reserved for signature on macOS
174 // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
175 // bytes 8-11: the clock id constant as i32
177 fn cond_get_id<'mir, 'tcx: 'mir>(
178 ecx: &MiriEvalContext<'mir, 'tcx>,
179 cond_op: OpTy<'tcx, Tag>,
180 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
181 ecx.read_scalar_at_offset(cond_op, 4, ecx.machine.layouts.u32)
184 fn cond_set_id<'mir, 'tcx: 'mir>(
185 ecx: &mut MiriEvalContext<'mir, 'tcx>,
186 cond_op: OpTy<'tcx, Tag>,
187 id: impl Into<ScalarMaybeUninit<Tag>>,
188 ) -> InterpResult<'tcx, ()> {
189 ecx.write_scalar_at_offset(cond_op, 4, id, ecx.machine.layouts.u32)
192 fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
193 ecx: &mut MiriEvalContext<'mir, 'tcx>,
194 cond_op: OpTy<'tcx, Tag>,
195 ) -> InterpResult<'tcx, CondvarId> {
196 let id = cond_get_id(ecx, cond_op)?.to_u32()?;
198 // 0 is a default value and also not a valid conditional variable id.
199 // Need to allocate a new id.
200 let id = ecx.condvar_create();
201 cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
204 Ok(CondvarId::from_u32(id))
208 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
209 ecx: &MiriEvalContext<'mir, 'tcx>,
210 cond_op: OpTy<'tcx, Tag>,
211 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
212 ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32)
215 fn cond_set_clock_id<'mir, 'tcx: 'mir>(
216 ecx: &mut MiriEvalContext<'mir, 'tcx>,
217 cond_op: OpTy<'tcx, Tag>,
218 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
219 ) -> InterpResult<'tcx, ()> {
220 ecx.write_scalar_at_offset(cond_op, 8, clock_id, ecx.machine.layouts.i32)
223 /// Try to reacquire the mutex associated with the condition variable after we
225 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
226 ecx: &mut MiriEvalContext<'mir, 'tcx>,
229 ) -> InterpResult<'tcx> {
230 ecx.unblock_thread(thread);
231 if ecx.mutex_is_locked(mutex) {
232 ecx.mutex_enqueue_and_block(mutex, thread);
234 ecx.mutex_lock(mutex, thread);
239 /// After a thread waiting on a condvar was signalled:
240 /// Reacquire the conditional variable and remove the timeout callback if any
242 fn post_cond_signal<'mir, 'tcx: 'mir>(
243 ecx: &mut MiriEvalContext<'mir, 'tcx>,
246 ) -> InterpResult<'tcx> {
247 reacquire_cond_mutex(ecx, thread, mutex)?;
248 // Waiting for the mutex is not included in the waiting time because we need
249 // to acquire the mutex always even if we get a timeout.
250 ecx.unregister_timeout_callback_if_exists(thread);
254 /// Release the mutex associated with the condition variable because we are
255 /// entering the waiting state.
256 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
257 ecx: &mut MiriEvalContext<'mir, 'tcx>,
258 active_thread: ThreadId,
260 ) -> InterpResult<'tcx> {
261 if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
262 if old_locked_count != 1 {
263 throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
266 throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
268 ecx.block_thread(active_thread);
272 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
273 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
274 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
275 let this = self.eval_context_mut();
277 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
278 mutexattr_set_kind(this, attr_op, default_kind)?;
283 fn pthread_mutexattr_settype(
285 attr_op: OpTy<'tcx, Tag>,
286 kind_op: OpTy<'tcx, Tag>,
287 ) -> InterpResult<'tcx, i32> {
288 let this = self.eval_context_mut();
290 let kind = this.read_scalar(kind_op)?.check_init()?;
291 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
292 // In `glibc` implementation, the numeric values of
293 // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal.
294 // However, a mutex created by explicitly passing
295 // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour
296 // from the default mutex for which the type was not explicitly
297 // specified. For a more detailed discussion, please see
298 // https://github.com/rust-lang/miri/issues/1419.
300 // To distinguish these two cases in already constructed mutexes, we
301 // use the same trick as glibc: for the case when
302 // `pthread_mutexattr_settype` is caled explicitly, we set the
303 // `PTHREAD_MUTEX_NORMAL_FLAG` flag.
304 let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG;
305 // Check that after setting the flag, the kind is distinguishable
306 // from all other kinds.
307 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?);
308 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?);
309 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?);
310 mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?;
311 } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
312 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
313 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
315 mutexattr_set_kind(this, attr_op, kind)?;
317 let einval = this.eval_libc_i32("EINVAL")?;
324 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
325 let this = self.eval_context_mut();
327 mutexattr_set_kind(this, attr_op, ScalarMaybeUninit::Uninit)?;
332 fn pthread_mutex_init(
334 mutex_op: OpTy<'tcx, Tag>,
335 attr_op: OpTy<'tcx, Tag>,
336 ) -> InterpResult<'tcx, i32> {
337 let this = self.eval_context_mut();
339 let attr = this.read_scalar(attr_op)?.check_init()?;
340 let kind = if this.is_null(attr)? {
341 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
343 mutexattr_get_kind(this, attr_op)?.check_init()?
346 // Write 0 to use the same code path as the static initializers.
347 mutex_set_id(this, mutex_op, Scalar::from_i32(0))?;
349 mutex_set_kind(this, mutex_op, kind)?;
354 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
355 let this = self.eval_context_mut();
357 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
358 let id = mutex_get_or_create_id(this, mutex_op)?;
359 let active_thread = this.get_active_thread();
361 if this.mutex_is_locked(id) {
362 let owner_thread = this.mutex_get_owner(id);
363 if owner_thread != active_thread {
364 // Enqueue the active thread.
365 this.mutex_enqueue_and_block(id, active_thread);
368 // Trying to acquire the same mutex again.
369 if is_mutex_kind_default(this, kind)? {
370 throw_ub_format!("trying to acquire already locked default mutex");
371 } else if is_mutex_kind_normal(this, kind)? {
372 throw_machine_stop!(TerminationInfo::Deadlock);
373 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
374 this.eval_libc_i32("EDEADLK")
375 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
376 this.mutex_lock(id, active_thread);
380 "called pthread_mutex_lock on an unsupported type of mutex"
385 // The mutex is unlocked. Let's lock it.
386 this.mutex_lock(id, active_thread);
391 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
392 let this = self.eval_context_mut();
394 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
395 let id = mutex_get_or_create_id(this, mutex_op)?;
396 let active_thread = this.get_active_thread();
398 if this.mutex_is_locked(id) {
399 let owner_thread = this.mutex_get_owner(id);
400 if owner_thread != active_thread {
401 this.eval_libc_i32("EBUSY")
403 if is_mutex_kind_default(this, kind)?
404 || is_mutex_kind_normal(this, kind)?
405 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
407 this.eval_libc_i32("EBUSY")
408 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
409 this.mutex_lock(id, active_thread);
413 "called pthread_mutex_trylock on an unsupported type of mutex"
418 // The mutex is unlocked. Let's lock it.
419 this.mutex_lock(id, active_thread);
424 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
425 let this = self.eval_context_mut();
427 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
428 let id = mutex_get_or_create_id(this, mutex_op)?;
429 let active_thread = this.get_active_thread();
431 if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
432 // The mutex was locked by the current thread.
435 // The mutex was locked by another thread or not locked at all. See
436 // the “Unlock When Not Owner” column in
437 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
438 if is_mutex_kind_default(this, kind)? {
440 "unlocked a default mutex that was not locked by the current thread"
442 } else if is_mutex_kind_normal(this, kind)? {
444 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
446 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
447 this.eval_libc_i32("EPERM")
448 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
449 this.eval_libc_i32("EPERM")
451 throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
456 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
457 let this = self.eval_context_mut();
459 let id = mutex_get_or_create_id(this, mutex_op)?;
461 if this.mutex_is_locked(id) {
462 throw_ub_format!("destroyed a locked mutex");
465 mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?;
466 mutex_set_id(this, mutex_op, ScalarMaybeUninit::Uninit)?;
467 // FIXME: delete interpreter state associated with this mutex.
472 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
473 let this = self.eval_context_mut();
475 let id = rwlock_get_or_create_id(this, rwlock_op)?;
476 let active_thread = this.get_active_thread();
478 if this.rwlock_is_write_locked(id) {
479 this.rwlock_enqueue_and_block_reader(id, active_thread);
482 this.rwlock_reader_lock(id, active_thread);
487 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
488 let this = self.eval_context_mut();
490 let id = rwlock_get_or_create_id(this, rwlock_op)?;
491 let active_thread = this.get_active_thread();
493 if this.rwlock_is_write_locked(id) {
494 this.eval_libc_i32("EBUSY")
496 this.rwlock_reader_lock(id, active_thread);
501 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
502 let this = self.eval_context_mut();
504 let id = rwlock_get_or_create_id(this, rwlock_op)?;
505 let active_thread = this.get_active_thread();
507 if this.rwlock_is_locked(id) {
508 // Note: this will deadlock if the lock is already locked by this
509 // thread in any way.
511 // Relevant documentation:
512 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
513 // An in-depth discussion on this topic:
514 // https://github.com/rust-lang/rust/issues/53127
516 // FIXME: Detect and report the deadlock proactively. (We currently
517 // report the deadlock only when no thread can continue execution,
518 // but we could detect that this lock is already locked and report
520 this.rwlock_enqueue_and_block_writer(id, active_thread);
522 this.rwlock_writer_lock(id, active_thread);
528 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
529 let this = self.eval_context_mut();
531 let id = rwlock_get_or_create_id(this, rwlock_op)?;
532 let active_thread = this.get_active_thread();
534 if this.rwlock_is_locked(id) {
535 this.eval_libc_i32("EBUSY")
537 this.rwlock_writer_lock(id, active_thread);
542 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
543 let this = self.eval_context_mut();
545 let id = rwlock_get_or_create_id(this, rwlock_op)?;
546 let active_thread = this.get_active_thread();
548 if this.rwlock_reader_unlock(id, active_thread) {
550 } else if this.rwlock_writer_unlock(id, active_thread) {
553 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
557 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
558 let this = self.eval_context_mut();
560 let id = rwlock_get_or_create_id(this, rwlock_op)?;
562 if this.rwlock_is_locked(id) {
563 throw_ub_format!("destroyed a locked rwlock");
566 rwlock_set_id(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
567 // FIXME: delete interpreter state associated with this rwlock.
572 fn pthread_condattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
573 let this = self.eval_context_mut();
575 // The default value of the clock attribute shall refer to the system
577 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
578 let default_clock_id = this.eval_libc("CLOCK_REALTIME")?;
579 condattr_set_clock_id(this, attr_op, default_clock_id)?;
584 fn pthread_condattr_setclock(
586 attr_op: OpTy<'tcx, Tag>,
587 clock_id_op: OpTy<'tcx, Tag>,
588 ) -> InterpResult<'tcx, i32> {
589 let this = self.eval_context_mut();
591 let clock_id = this.read_scalar(clock_id_op)?.check_init()?;
592 if clock_id == this.eval_libc("CLOCK_REALTIME")?
593 || clock_id == this.eval_libc("CLOCK_MONOTONIC")?
595 condattr_set_clock_id(this, attr_op, clock_id)?;
597 let einval = this.eval_libc_i32("EINVAL")?;
604 fn pthread_condattr_getclock(
606 attr_op: OpTy<'tcx, Tag>,
607 clk_id_op: OpTy<'tcx, Tag>,
608 ) -> InterpResult<'tcx, i32> {
609 let this = self.eval_context_mut();
611 let clock_id = condattr_get_clock_id(this, attr_op)?;
612 this.write_scalar(clock_id, this.deref_operand(clk_id_op)?.into())?;
617 fn pthread_condattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
618 let this = self.eval_context_mut();
620 condattr_set_clock_id(this, attr_op, ScalarMaybeUninit::Uninit)?;
625 fn pthread_cond_init(
627 cond_op: OpTy<'tcx, Tag>,
628 attr_op: OpTy<'tcx, Tag>,
629 ) -> InterpResult<'tcx, i32> {
630 let this = self.eval_context_mut();
632 let attr = this.read_scalar(attr_op)?.check_init()?;
633 let clock_id = if this.is_null(attr)? {
634 this.eval_libc("CLOCK_REALTIME")?
636 condattr_get_clock_id(this, attr_op)?.check_init()?
639 // Write 0 to use the same code path as the static initializers.
640 cond_set_id(this, cond_op, Scalar::from_i32(0))?;
642 cond_set_clock_id(this, cond_op, clock_id)?;
647 fn pthread_cond_signal(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
648 let this = self.eval_context_mut();
649 let id = cond_get_or_create_id(this, cond_op)?;
650 if let Some((thread, mutex)) = this.condvar_signal(id) {
651 post_cond_signal(this, thread, mutex)?;
657 fn pthread_cond_broadcast(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
658 let this = self.eval_context_mut();
659 let id = cond_get_or_create_id(this, cond_op)?;
661 while let Some((thread, mutex)) = this.condvar_signal(id) {
662 post_cond_signal(this, thread, mutex)?;
668 fn pthread_cond_wait(
670 cond_op: OpTy<'tcx, Tag>,
671 mutex_op: OpTy<'tcx, Tag>,
672 ) -> InterpResult<'tcx, i32> {
673 let this = self.eval_context_mut();
675 let id = cond_get_or_create_id(this, cond_op)?;
676 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
677 let active_thread = this.get_active_thread();
679 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
680 this.condvar_wait(id, active_thread, mutex_id);
685 fn pthread_cond_timedwait(
687 cond_op: OpTy<'tcx, Tag>,
688 mutex_op: OpTy<'tcx, Tag>,
689 abstime_op: OpTy<'tcx, Tag>,
690 dest: PlaceTy<'tcx, Tag>,
691 ) -> InterpResult<'tcx> {
692 let this = self.eval_context_mut();
694 this.check_no_isolation("pthread_cond_timedwait")?;
696 let id = cond_get_or_create_id(this, cond_op)?;
697 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
698 let active_thread = this.get_active_thread();
700 // Extract the timeout.
701 let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?;
702 let duration = match this.read_timespec(abstime_op)? {
703 Ok(duration) => duration,
704 Err(TimespecError) => {
705 let einval = this.eval_libc("EINVAL")?;
706 this.write_scalar(einval, dest)?;
711 let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? {
712 Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
713 } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? {
714 Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap())
716 throw_unsup_format!("unsupported clock id: {}", clock_id);
719 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
720 this.condvar_wait(id, active_thread, mutex_id);
722 // We return success for now and override it in the timeout callback.
723 this.write_scalar(Scalar::from_i32(0), dest)?;
725 // Register the timeout callback.
726 this.register_timeout_callback(
729 Box::new(move |ecx| {
730 // We are not waiting for the condvar any more, wait for the
732 reacquire_cond_mutex(ecx, active_thread, mutex_id)?;
734 // Remove the thread from the conditional variable.
735 ecx.condvar_remove_waiter(id, active_thread);
737 // Set the return value: we timed out.
738 let etimedout = ecx.eval_libc("ETIMEDOUT")?;
739 ecx.write_scalar(etimedout, dest)?;
748 fn pthread_cond_destroy(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
749 let this = self.eval_context_mut();
751 let id = cond_get_or_create_id(this, cond_op)?;
752 if this.condvar_is_awaited(id) {
753 throw_ub_format!("destroying an awaited conditional variable");
755 cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
756 cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
757 // FIXME: delete interpreter state associated with this condvar.