1 use std::time::SystemTime;
4 use stacked_borrows::Tag;
7 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
9 // Our chosen memory layout for emulation (does not have to match the platform layout!):
10 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
11 // (e.g. PTHREAD_MUTEX_NORMAL).
13 /// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from
14 /// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values,
15 /// but different behaviour, we need a way to distinguish them. We do this by
16 /// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment
17 /// in `pthread_mutexattr_settype` function.
18 const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
20 fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
21 ecx: &mut MiriEvalContext<'mir, 'tcx>,
23 ) -> InterpResult<'tcx, bool> {
24 Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?)
27 fn is_mutex_kind_normal<'mir, 'tcx: 'mir>(
28 ecx: &mut MiriEvalContext<'mir, 'tcx>,
30 ) -> InterpResult<'tcx, bool> {
31 let kind = kind.to_i32()?;
32 let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?;
33 Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
36 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
37 ecx: &MiriEvalContext<'mir, 'tcx>,
38 attr_op: OpTy<'tcx, Tag>,
39 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
40 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
43 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
44 ecx: &mut MiriEvalContext<'mir, 'tcx>,
45 attr_op: OpTy<'tcx, Tag>,
46 kind: impl Into<ScalarMaybeUninit<Tag>>,
47 ) -> InterpResult<'tcx, ()> {
48 ecx.write_scalar_at_offset(attr_op, 0, kind, ecx.machine.layouts.i32)
51 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
53 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
54 // bytes 0-3: reserved for signature on macOS
55 // (need to avoid this because it is set by static initializer macros)
56 // bytes 4-7: mutex id as u32 or 0 if id is not assigned yet.
57 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
58 // (the kind has to be at its offset for compatibility with static initializer macros)
60 fn mutex_get_kind<'mir, 'tcx: 'mir>(
61 ecx: &mut MiriEvalContext<'mir, 'tcx>,
62 mutex_op: OpTy<'tcx, Tag>,
63 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
64 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
65 ecx.read_scalar_at_offset(mutex_op, offset, ecx.machine.layouts.i32)
68 fn mutex_set_kind<'mir, 'tcx: 'mir>(
69 ecx: &mut MiriEvalContext<'mir, 'tcx>,
70 mutex_op: OpTy<'tcx, Tag>,
71 kind: impl Into<ScalarMaybeUninit<Tag>>,
72 ) -> InterpResult<'tcx, ()> {
73 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
74 ecx.write_scalar_at_offset(mutex_op, offset, kind, ecx.machine.layouts.i32)
77 fn mutex_get_id<'mir, 'tcx: 'mir>(
78 ecx: &MiriEvalContext<'mir, 'tcx>,
79 mutex_op: OpTy<'tcx, Tag>,
80 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
81 ecx.read_scalar_at_offset(mutex_op, 4, ecx.machine.layouts.u32)
84 fn mutex_set_id<'mir, 'tcx: 'mir>(
85 ecx: &mut MiriEvalContext<'mir, 'tcx>,
86 mutex_op: OpTy<'tcx, Tag>,
87 id: impl Into<ScalarMaybeUninit<Tag>>,
88 ) -> InterpResult<'tcx, ()> {
89 ecx.write_scalar_at_offset(mutex_op, 4, id, ecx.machine.layouts.u32)
92 fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
93 ecx: &mut MiriEvalContext<'mir, 'tcx>,
94 mutex_op: OpTy<'tcx, Tag>,
95 ) -> InterpResult<'tcx, MutexId> {
96 let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
98 // 0 is a default value and also not a valid mutex id. Need to allocate
100 let id = ecx.mutex_create();
101 mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
104 Ok(MutexId::from_u32(id))
108 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
110 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
111 // bytes 0-3: reserved for signature on macOS
112 // (need to avoid this because it is set by static initializer macros)
113 // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
115 fn rwlock_get_id<'mir, 'tcx: 'mir>(
116 ecx: &MiriEvalContext<'mir, 'tcx>,
117 rwlock_op: OpTy<'tcx, Tag>,
118 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
119 ecx.read_scalar_at_offset(rwlock_op, 4, ecx.machine.layouts.u32)
122 fn rwlock_set_id<'mir, 'tcx: 'mir>(
123 ecx: &mut MiriEvalContext<'mir, 'tcx>,
124 rwlock_op: OpTy<'tcx, Tag>,
125 id: impl Into<ScalarMaybeUninit<Tag>>,
126 ) -> InterpResult<'tcx, ()> {
127 ecx.write_scalar_at_offset(rwlock_op, 4, id, ecx.machine.layouts.u32)
130 fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
131 ecx: &mut MiriEvalContext<'mir, 'tcx>,
132 rwlock_op: OpTy<'tcx, Tag>,
133 ) -> InterpResult<'tcx, RwLockId> {
134 let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
136 // 0 is a default value and also not a valid rwlock id. Need to allocate
137 // a new read-write lock.
138 let id = ecx.rwlock_create();
139 rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
142 Ok(RwLockId::from_u32(id))
146 // pthread_condattr_t
148 // Our chosen memory layout for emulation (does not have to match the platform layout!):
149 // store an i32 in the first four bytes equal to the corresponding libc clock id constant
150 // (e.g. CLOCK_REALTIME).
152 fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
153 ecx: &MiriEvalContext<'mir, 'tcx>,
154 attr_op: OpTy<'tcx, Tag>,
155 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
156 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
159 fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
160 ecx: &mut MiriEvalContext<'mir, 'tcx>,
161 attr_op: OpTy<'tcx, Tag>,
162 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
163 ) -> InterpResult<'tcx, ()> {
164 ecx.write_scalar_at_offset(attr_op, 0, clock_id, ecx.machine.layouts.i32)
169 // Our chosen memory layout for the emulated conditional variable (does not have
170 // to match the platform layout!):
172 // bytes 0-3: reserved for signature on macOS
173 // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
174 // bytes 8-11: the clock id constant as i32
176 fn cond_get_id<'mir, 'tcx: 'mir>(
177 ecx: &MiriEvalContext<'mir, 'tcx>,
178 cond_op: OpTy<'tcx, Tag>,
179 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
180 ecx.read_scalar_at_offset(cond_op, 4, ecx.machine.layouts.u32)
183 fn cond_set_id<'mir, 'tcx: 'mir>(
184 ecx: &mut MiriEvalContext<'mir, 'tcx>,
185 cond_op: OpTy<'tcx, Tag>,
186 id: impl Into<ScalarMaybeUninit<Tag>>,
187 ) -> InterpResult<'tcx, ()> {
188 ecx.write_scalar_at_offset(cond_op, 4, id, ecx.machine.layouts.u32)
191 fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
192 ecx: &mut MiriEvalContext<'mir, 'tcx>,
193 cond_op: OpTy<'tcx, Tag>,
194 ) -> InterpResult<'tcx, CondvarId> {
195 let id = cond_get_id(ecx, cond_op)?.to_u32()?;
197 // 0 is a default value and also not a valid conditional variable id.
198 // Need to allocate a new id.
199 let id = ecx.condvar_create();
200 cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
203 Ok(CondvarId::from_u32(id))
207 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
208 ecx: &MiriEvalContext<'mir, 'tcx>,
209 cond_op: OpTy<'tcx, Tag>,
210 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
211 ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32)
214 fn cond_set_clock_id<'mir, 'tcx: 'mir>(
215 ecx: &mut MiriEvalContext<'mir, 'tcx>,
216 cond_op: OpTy<'tcx, Tag>,
217 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
218 ) -> InterpResult<'tcx, ()> {
219 ecx.write_scalar_at_offset(cond_op, 8, clock_id, ecx.machine.layouts.i32)
222 /// Try to reacquire the mutex associated with the condition variable after we
224 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
225 ecx: &mut MiriEvalContext<'mir, 'tcx>,
228 ) -> InterpResult<'tcx> {
229 ecx.unblock_thread(thread);
230 if ecx.mutex_is_locked(mutex) {
231 ecx.mutex_enqueue_and_block(mutex, thread);
233 ecx.mutex_lock(mutex, thread);
238 /// After a thread waiting on a condvar was signalled:
239 /// Reacquire the conditional variable and remove the timeout callback if any
241 fn post_cond_signal<'mir, 'tcx: 'mir>(
242 ecx: &mut MiriEvalContext<'mir, 'tcx>,
245 ) -> InterpResult<'tcx> {
246 reacquire_cond_mutex(ecx, thread, mutex)?;
247 // Waiting for the mutex is not included in the waiting time because we need
248 // to acquire the mutex always even if we get a timeout.
249 ecx.unregister_timeout_callback_if_exists(thread);
253 /// Release the mutex associated with the condition variable because we are
254 /// entering the waiting state.
255 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
256 ecx: &mut MiriEvalContext<'mir, 'tcx>,
257 active_thread: ThreadId,
259 ) -> InterpResult<'tcx> {
260 if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
261 if old_locked_count != 1 {
262 throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
265 throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
267 ecx.block_thread(active_thread);
271 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
272 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
273 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
274 let this = self.eval_context_mut();
276 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
277 mutexattr_set_kind(this, attr_op, default_kind)?;
282 fn pthread_mutexattr_settype(
284 attr_op: OpTy<'tcx, Tag>,
285 kind_op: OpTy<'tcx, Tag>,
286 ) -> InterpResult<'tcx, i32> {
287 let this = self.eval_context_mut();
289 let kind = this.read_scalar(kind_op)?.check_init()?;
290 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
291 // In `glibc` implementation, the numeric values of
292 // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal.
293 // However, a mutex created by explicitly passing
294 // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour
295 // from the default mutex for which the type was not explicitly
296 // specified. For a more detailed discussion, please see
297 // https://github.com/rust-lang/miri/issues/1419.
299 // To distinguish these two cases in already constructed mutexes, we
300 // use the same trick as glibc: for the case when
301 // `pthread_mutexattr_settype` is caled explicitly, we set the
302 // `PTHREAD_MUTEX_NORMAL_FLAG` flag.
303 let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG;
304 // Check that after setting the flag, the kind is distinguishable
305 // from all other kinds.
306 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?);
307 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?);
308 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?);
309 mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?;
310 } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
311 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
312 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
314 mutexattr_set_kind(this, attr_op, kind)?;
316 let einval = this.eval_libc_i32("EINVAL")?;
323 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
324 let this = self.eval_context_mut();
326 mutexattr_set_kind(this, attr_op, ScalarMaybeUninit::Uninit)?;
331 fn pthread_mutex_init(
333 mutex_op: OpTy<'tcx, Tag>,
334 attr_op: OpTy<'tcx, Tag>,
335 ) -> InterpResult<'tcx, i32> {
336 let this = self.eval_context_mut();
338 let attr = this.read_scalar(attr_op)?.check_init()?;
339 let kind = if this.is_null(attr)? {
340 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
342 mutexattr_get_kind(this, attr_op)?.check_init()?
345 // Write 0 to use the same code path as the static initializers.
346 mutex_set_id(this, mutex_op, Scalar::from_i32(0))?;
348 mutex_set_kind(this, mutex_op, kind)?;
353 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
354 let this = self.eval_context_mut();
356 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
357 let id = mutex_get_or_create_id(this, mutex_op)?;
358 let active_thread = this.get_active_thread();
360 if this.mutex_is_locked(id) {
361 let owner_thread = this.mutex_get_owner(id);
362 if owner_thread != active_thread {
363 // Enqueue the active thread.
364 this.mutex_enqueue_and_block(id, active_thread);
367 // Trying to acquire the same mutex again.
368 if is_mutex_kind_default(this, kind)? {
369 throw_ub_format!("trying to acquire already locked default mutex");
370 } else if is_mutex_kind_normal(this, kind)? {
371 throw_machine_stop!(TerminationInfo::Deadlock);
372 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
373 this.eval_libc_i32("EDEADLK")
374 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
375 this.mutex_lock(id, active_thread);
379 "called pthread_mutex_lock on an unsupported type of mutex"
384 // The mutex is unlocked. Let's lock it.
385 this.mutex_lock(id, active_thread);
390 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
391 let this = self.eval_context_mut();
393 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
394 let id = mutex_get_or_create_id(this, mutex_op)?;
395 let active_thread = this.get_active_thread();
397 if this.mutex_is_locked(id) {
398 let owner_thread = this.mutex_get_owner(id);
399 if owner_thread != active_thread {
400 this.eval_libc_i32("EBUSY")
402 if is_mutex_kind_default(this, kind)?
403 || is_mutex_kind_normal(this, kind)?
404 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
406 this.eval_libc_i32("EBUSY")
407 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
408 this.mutex_lock(id, active_thread);
412 "called pthread_mutex_trylock on an unsupported type of mutex"
417 // The mutex is unlocked. Let's lock it.
418 this.mutex_lock(id, active_thread);
423 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
424 let this = self.eval_context_mut();
426 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
427 let id = mutex_get_or_create_id(this, mutex_op)?;
428 let active_thread = this.get_active_thread();
430 if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
431 // The mutex was locked by the current thread.
434 // The mutex was locked by another thread or not locked at all. See
435 // the “Unlock When Not Owner” column in
436 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
437 if is_mutex_kind_default(this, kind)? {
439 "unlocked a default mutex that was not locked by the current thread"
441 } else if is_mutex_kind_normal(this, kind)? {
443 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
445 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
446 this.eval_libc_i32("EPERM")
447 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
448 this.eval_libc_i32("EPERM")
450 throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
455 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
456 let this = self.eval_context_mut();
458 let id = mutex_get_or_create_id(this, mutex_op)?;
460 if this.mutex_is_locked(id) {
461 throw_ub_format!("destroyed a locked mutex");
464 mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?;
465 mutex_set_id(this, mutex_op, ScalarMaybeUninit::Uninit)?;
466 // FIXME: delete interpreter state associated with this mutex.
471 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
472 let this = self.eval_context_mut();
474 let id = rwlock_get_or_create_id(this, rwlock_op)?;
475 let active_thread = this.get_active_thread();
477 if this.rwlock_is_write_locked(id) {
478 this.rwlock_enqueue_and_block_reader(id, active_thread);
481 this.rwlock_reader_lock(id, active_thread);
486 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
487 let this = self.eval_context_mut();
489 let id = rwlock_get_or_create_id(this, rwlock_op)?;
490 let active_thread = this.get_active_thread();
492 if this.rwlock_is_write_locked(id) {
493 this.eval_libc_i32("EBUSY")
495 this.rwlock_reader_lock(id, active_thread);
500 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
501 let this = self.eval_context_mut();
503 let id = rwlock_get_or_create_id(this, rwlock_op)?;
504 let active_thread = this.get_active_thread();
506 if this.rwlock_is_locked(id) {
507 // Note: this will deadlock if the lock is already locked by this
508 // thread in any way.
510 // Relevant documentation:
511 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
512 // An in-depth discussion on this topic:
513 // https://github.com/rust-lang/rust/issues/53127
515 // FIXME: Detect and report the deadlock proactively. (We currently
516 // report the deadlock only when no thread can continue execution,
517 // but we could detect that this lock is already locked and report
519 this.rwlock_enqueue_and_block_writer(id, active_thread);
521 this.rwlock_writer_lock(id, active_thread);
527 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
528 let this = self.eval_context_mut();
530 let id = rwlock_get_or_create_id(this, rwlock_op)?;
531 let active_thread = this.get_active_thread();
533 if this.rwlock_is_locked(id) {
534 this.eval_libc_i32("EBUSY")
536 this.rwlock_writer_lock(id, active_thread);
541 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
542 let this = self.eval_context_mut();
544 let id = rwlock_get_or_create_id(this, rwlock_op)?;
545 let active_thread = this.get_active_thread();
547 if this.rwlock_reader_unlock(id, active_thread) {
549 } else if this.rwlock_writer_unlock(id, active_thread) {
552 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
556 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
557 let this = self.eval_context_mut();
559 let id = rwlock_get_or_create_id(this, rwlock_op)?;
561 if this.rwlock_is_locked(id) {
562 throw_ub_format!("destroyed a locked rwlock");
565 rwlock_set_id(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
566 // FIXME: delete interpreter state associated with this rwlock.
571 fn pthread_condattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
572 let this = self.eval_context_mut();
574 // The default value of the clock attribute shall refer to the system
576 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
577 let default_clock_id = this.eval_libc("CLOCK_REALTIME")?;
578 condattr_set_clock_id(this, attr_op, default_clock_id)?;
583 fn pthread_condattr_setclock(
585 attr_op: OpTy<'tcx, Tag>,
586 clock_id_op: OpTy<'tcx, Tag>,
587 ) -> InterpResult<'tcx, i32> {
588 let this = self.eval_context_mut();
590 let clock_id = this.read_scalar(clock_id_op)?.check_init()?;
591 if clock_id == this.eval_libc("CLOCK_REALTIME")?
592 || clock_id == this.eval_libc("CLOCK_MONOTONIC")?
594 condattr_set_clock_id(this, attr_op, clock_id)?;
596 let einval = this.eval_libc_i32("EINVAL")?;
603 fn pthread_condattr_getclock(
605 attr_op: OpTy<'tcx, Tag>,
606 clk_id_op: OpTy<'tcx, Tag>,
607 ) -> InterpResult<'tcx, i32> {
608 let this = self.eval_context_mut();
610 let clock_id = condattr_get_clock_id(this, attr_op)?;
611 this.write_scalar(clock_id, this.deref_operand(clk_id_op)?.into())?;
616 fn pthread_condattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
617 let this = self.eval_context_mut();
619 condattr_set_clock_id(this, attr_op, ScalarMaybeUninit::Uninit)?;
624 fn pthread_cond_init(
626 cond_op: OpTy<'tcx, Tag>,
627 attr_op: OpTy<'tcx, Tag>,
628 ) -> InterpResult<'tcx, i32> {
629 let this = self.eval_context_mut();
631 let attr = this.read_scalar(attr_op)?.check_init()?;
632 let clock_id = if this.is_null(attr)? {
633 this.eval_libc("CLOCK_REALTIME")?
635 condattr_get_clock_id(this, attr_op)?.check_init()?
638 // Write 0 to use the same code path as the static initializers.
639 cond_set_id(this, cond_op, Scalar::from_i32(0))?;
641 cond_set_clock_id(this, cond_op, clock_id)?;
646 fn pthread_cond_signal(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
647 let this = self.eval_context_mut();
648 let id = cond_get_or_create_id(this, cond_op)?;
649 if let Some((thread, mutex)) = this.condvar_signal(id) {
650 post_cond_signal(this, thread, mutex)?;
656 fn pthread_cond_broadcast(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
657 let this = self.eval_context_mut();
658 let id = cond_get_or_create_id(this, cond_op)?;
660 while let Some((thread, mutex)) = this.condvar_signal(id) {
661 post_cond_signal(this, thread, mutex)?;
667 fn pthread_cond_wait(
669 cond_op: OpTy<'tcx, Tag>,
670 mutex_op: OpTy<'tcx, Tag>,
671 ) -> InterpResult<'tcx, i32> {
672 let this = self.eval_context_mut();
674 let id = cond_get_or_create_id(this, cond_op)?;
675 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
676 let active_thread = this.get_active_thread();
678 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
679 this.condvar_wait(id, active_thread, mutex_id);
684 fn pthread_cond_timedwait(
686 cond_op: OpTy<'tcx, Tag>,
687 mutex_op: OpTy<'tcx, Tag>,
688 abstime_op: OpTy<'tcx, Tag>,
689 dest: PlaceTy<'tcx, Tag>,
690 ) -> InterpResult<'tcx> {
691 let this = self.eval_context_mut();
693 this.check_no_isolation("pthread_cond_timedwait")?;
695 let id = cond_get_or_create_id(this, cond_op)?;
696 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
697 let active_thread = this.get_active_thread();
699 // Extract the timeout.
700 let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?;
701 let duration = match this.read_timespec(abstime_op)? {
702 Some(duration) => duration,
704 let einval = this.eval_libc("EINVAL")?;
705 this.write_scalar(einval, dest)?;
710 let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? {
711 Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
712 } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? {
713 Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap())
715 throw_unsup_format!("unsupported clock id: {}", clock_id);
718 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
719 this.condvar_wait(id, active_thread, mutex_id);
721 // We return success for now and override it in the timeout callback.
722 this.write_scalar(Scalar::from_i32(0), dest)?;
724 // Register the timeout callback.
725 this.register_timeout_callback(
728 Box::new(move |ecx| {
729 // We are not waiting for the condvar any more, wait for the
731 reacquire_cond_mutex(ecx, active_thread, mutex_id)?;
733 // Remove the thread from the conditional variable.
734 ecx.condvar_remove_waiter(id, active_thread);
736 // Set the return value: we timed out.
737 let etimedout = ecx.eval_libc("ETIMEDOUT")?;
738 ecx.write_scalar(etimedout, dest)?;
747 fn pthread_cond_destroy(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
748 let this = self.eval_context_mut();
750 let id = cond_get_or_create_id(this, cond_op)?;
751 if this.condvar_is_awaited(id) {
752 throw_ub_format!("destroying an awaited conditional variable");
754 cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
755 cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
756 // FIXME: delete interpreter state associated with this condvar.