1 use std::time::SystemTime;
3 use rustc_hir::LangItem;
4 use rustc_middle::ty::{layout::TyAndLayout, query::TyCtxtAt, subst::Subst, Ty};
9 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
11 // Our chosen memory layout for emulation (does not have to match the platform layout!):
12 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
13 // (e.g. PTHREAD_MUTEX_NORMAL).
15 /// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from
16 /// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values,
17 /// but different behaviour, we need a way to distinguish them. We do this by
18 /// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment
19 /// in `pthread_mutexattr_settype` function.
20 const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
22 fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
23 ecx: &mut MiriEvalContext<'mir, 'tcx>,
25 ) -> InterpResult<'tcx, bool> {
26 Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?)
29 fn is_mutex_kind_normal<'mir, 'tcx: 'mir>(
30 ecx: &mut MiriEvalContext<'mir, 'tcx>,
32 ) -> InterpResult<'tcx, bool> {
33 let kind = kind.to_i32()?;
34 let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?;
35 Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
38 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
39 ecx: &MiriEvalContext<'mir, 'tcx>,
40 attr_op: &OpTy<'tcx, Tag>,
41 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
42 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
45 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
46 ecx: &mut MiriEvalContext<'mir, 'tcx>,
47 attr_op: &OpTy<'tcx, Tag>,
48 kind: impl Into<ScalarMaybeUninit<Tag>>,
49 ) -> InterpResult<'tcx, ()> {
50 ecx.write_scalar_at_offset(attr_op, 0, kind, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32))
53 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
55 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
56 // bytes 0-3: reserved for signature on macOS
57 // (need to avoid this because it is set by static initializer macros)
58 // bytes 4-7: mutex id as u32 or 0 if id is not assigned yet.
59 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
60 // (the kind has to be at its offset for compatibility with static initializer macros)
62 fn mutex_get_kind<'mir, 'tcx: 'mir>(
63 ecx: &MiriEvalContext<'mir, 'tcx>,
64 mutex_op: &OpTy<'tcx, Tag>,
65 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
66 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
67 ecx.read_scalar_at_offset_atomic(
70 ecx.machine.layouts.i32,
71 AtomicReadOp::Relaxed,
75 fn mutex_set_kind<'mir, 'tcx: 'mir>(
76 ecx: &mut MiriEvalContext<'mir, 'tcx>,
77 mutex_op: &OpTy<'tcx, Tag>,
78 kind: impl Into<ScalarMaybeUninit<Tag>>,
79 ) -> InterpResult<'tcx, ()> {
80 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
81 ecx.write_scalar_at_offset_atomic(
85 layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32),
86 AtomicWriteOp::Relaxed,
90 fn mutex_get_id<'mir, 'tcx: 'mir>(
91 ecx: &MiriEvalContext<'mir, 'tcx>,
92 mutex_op: &OpTy<'tcx, Tag>,
93 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
94 ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
97 fn mutex_set_id<'mir, 'tcx: 'mir>(
98 ecx: &mut MiriEvalContext<'mir, 'tcx>,
99 mutex_op: &OpTy<'tcx, Tag>,
100 id: impl Into<ScalarMaybeUninit<Tag>>,
101 ) -> InterpResult<'tcx, ()> {
102 ecx.write_scalar_at_offset_atomic(
106 layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
107 AtomicWriteOp::Relaxed,
111 fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
112 ecx: &mut MiriEvalContext<'mir, 'tcx>,
113 mutex_op: &OpTy<'tcx, Tag>,
114 ) -> InterpResult<'tcx, MutexId> {
115 let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?;
117 ecx.mutex_get_or_create(|ecx, next_id| {
118 let (old, success) = ecx
119 .atomic_compare_exchange_scalar(
121 &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
122 next_id.to_u32_scalar().into(),
124 AtomicReadOp::Relaxed,
128 .expect("compare_exchange returns a scalar pair");
130 Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
131 // Caller of the closure needs to allocate next_id
134 Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
139 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
141 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
142 // bytes 0-3: reserved for signature on macOS
143 // (need to avoid this because it is set by static initializer macros)
144 // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
146 fn rwlock_get_id<'mir, 'tcx: 'mir>(
147 ecx: &MiriEvalContext<'mir, 'tcx>,
148 rwlock_op: &OpTy<'tcx, Tag>,
149 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
150 ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
153 fn rwlock_set_id<'mir, 'tcx: 'mir>(
154 ecx: &mut MiriEvalContext<'mir, 'tcx>,
155 rwlock_op: &OpTy<'tcx, Tag>,
156 id: impl Into<ScalarMaybeUninit<Tag>>,
157 ) -> InterpResult<'tcx, ()> {
158 ecx.write_scalar_at_offset_atomic(
162 layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
163 AtomicWriteOp::Relaxed,
167 fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
168 ecx: &mut MiriEvalContext<'mir, 'tcx>,
169 rwlock_op: &OpTy<'tcx, Tag>,
170 ) -> InterpResult<'tcx, RwLockId> {
171 let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?;
173 ecx.rwlock_get_or_create(|ecx, next_id| {
174 let (old, success) = ecx
175 .atomic_compare_exchange_scalar(
177 &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
178 next_id.to_u32_scalar().into(),
180 AtomicReadOp::Relaxed,
184 .expect("compare_exchange returns a scalar pair");
186 Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
187 // Caller of the closure needs to allocate next_id
190 Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
195 // pthread_condattr_t
197 // Our chosen memory layout for emulation (does not have to match the platform layout!):
198 // store an i32 in the first four bytes equal to the corresponding libc clock id constant
199 // (e.g. CLOCK_REALTIME).
201 fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
202 ecx: &MiriEvalContext<'mir, 'tcx>,
203 attr_op: &OpTy<'tcx, Tag>,
204 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
205 ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
208 fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
209 ecx: &mut MiriEvalContext<'mir, 'tcx>,
210 attr_op: &OpTy<'tcx, Tag>,
211 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
212 ) -> InterpResult<'tcx, ()> {
213 ecx.write_scalar_at_offset(
217 layout_of_maybe_uninit(ecx.tcx, ecx.machine.layouts.i32.ty),
223 // Our chosen memory layout for the emulated conditional variable (does not have
224 // to match the platform layout!):
226 // bytes 0-3: reserved for signature on macOS
227 // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
228 // bytes 8-11: the clock id constant as i32
230 fn cond_get_id<'mir, 'tcx: 'mir>(
231 ecx: &MiriEvalContext<'mir, 'tcx>,
232 cond_op: &OpTy<'tcx, Tag>,
233 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
234 ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
237 fn cond_set_id<'mir, 'tcx: 'mir>(
238 ecx: &mut MiriEvalContext<'mir, 'tcx>,
239 cond_op: &OpTy<'tcx, Tag>,
240 id: impl Into<ScalarMaybeUninit<Tag>>,
241 ) -> InterpResult<'tcx, ()> {
242 ecx.write_scalar_at_offset_atomic(
246 layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
247 AtomicWriteOp::Relaxed,
251 fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
252 ecx: &mut MiriEvalContext<'mir, 'tcx>,
253 cond_op: &OpTy<'tcx, Tag>,
254 ) -> InterpResult<'tcx, CondvarId> {
255 let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?;
257 ecx.condvar_get_or_create(|ecx, next_id| {
258 let (old, success) = ecx
259 .atomic_compare_exchange_scalar(
261 &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
262 next_id.to_u32_scalar().into(),
264 AtomicReadOp::Relaxed,
268 .expect("compare_exchange returns a scalar pair");
270 Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
271 // Caller of the closure needs to allocate next_id
274 Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
279 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
280 ecx: &MiriEvalContext<'mir, 'tcx>,
281 cond_op: &OpTy<'tcx, Tag>,
282 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
283 ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32)
286 fn cond_set_clock_id<'mir, 'tcx: 'mir>(
287 ecx: &mut MiriEvalContext<'mir, 'tcx>,
288 cond_op: &OpTy<'tcx, Tag>,
289 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
290 ) -> InterpResult<'tcx, ()> {
291 ecx.write_scalar_at_offset(
295 layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32),
299 /// Try to reacquire the mutex associated with the condition variable after we
301 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
302 ecx: &mut MiriEvalContext<'mir, 'tcx>,
305 ) -> InterpResult<'tcx> {
306 ecx.unblock_thread(thread);
307 if ecx.mutex_is_locked(mutex) {
308 ecx.mutex_enqueue_and_block(mutex, thread);
310 ecx.mutex_lock(mutex, thread);
315 /// After a thread waiting on a condvar was signalled:
316 /// Reacquire the conditional variable and remove the timeout callback if any
318 fn post_cond_signal<'mir, 'tcx: 'mir>(
319 ecx: &mut MiriEvalContext<'mir, 'tcx>,
322 ) -> InterpResult<'tcx> {
323 reacquire_cond_mutex(ecx, thread, mutex)?;
324 // Waiting for the mutex is not included in the waiting time because we need
325 // to acquire the mutex always even if we get a timeout.
326 ecx.unregister_timeout_callback_if_exists(thread);
330 /// Release the mutex associated with the condition variable because we are
331 /// entering the waiting state.
332 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
333 ecx: &mut MiriEvalContext<'mir, 'tcx>,
334 active_thread: ThreadId,
336 ) -> InterpResult<'tcx> {
337 if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
338 if old_locked_count != 1 {
339 throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
342 throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
344 ecx.block_thread(active_thread);
348 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
349 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
350 fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
351 let this = self.eval_context_mut();
353 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
354 mutexattr_set_kind(this, attr_op, default_kind)?;
359 fn pthread_mutexattr_settype(
361 attr_op: &OpTy<'tcx, Tag>,
362 kind_op: &OpTy<'tcx, Tag>,
363 ) -> InterpResult<'tcx, i32> {
364 let this = self.eval_context_mut();
366 let kind = this.read_scalar(kind_op)?.check_init()?;
367 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
368 // In `glibc` implementation, the numeric values of
369 // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal.
370 // However, a mutex created by explicitly passing
371 // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour
372 // from the default mutex for which the type was not explicitly
373 // specified. For a more detailed discussion, please see
374 // https://github.com/rust-lang/miri/issues/1419.
376 // To distinguish these two cases in already constructed mutexes, we
377 // use the same trick as glibc: for the case when
378 // `pthread_mutexattr_settype` is caled explicitly, we set the
379 // `PTHREAD_MUTEX_NORMAL_FLAG` flag.
380 let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG;
381 // Check that after setting the flag, the kind is distinguishable
382 // from all other kinds.
383 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?);
384 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?);
385 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?);
386 mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?;
387 } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
388 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
389 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
391 mutexattr_set_kind(this, attr_op, kind)?;
393 let einval = this.eval_libc_i32("EINVAL")?;
400 fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
401 let this = self.eval_context_mut();
403 // Destroying an uninit pthread_mutexattr is UB, so check to make sure it's not uninit.
404 mutexattr_get_kind(this, attr_op)?.check_init()?;
406 // To catch double-destroys, we de-initialize the mutexattr.
407 // This is technically not right and might lead to false positives. For example, the below
408 // code is *likely* sound, even assuming uninit numbers are UB, but Miri complains.
410 // let mut x: MaybeUninit<libc::pthread_mutexattr_t> = MaybeUninit::zeroed();
411 // libc::pthread_mutexattr_init(x.as_mut_ptr());
412 // libc::pthread_mutexattr_destroy(x.as_mut_ptr());
415 // However, the way libstd uses the pthread APIs works in our favor here, so we can get away with this.
416 // This can always be revisited to have some external state to catch double-destroys
417 // but not complain about the above code. See https://github.com/rust-lang/miri/pull/1933
419 mutexattr_set_kind(this, attr_op, ScalarMaybeUninit::Uninit)?;
424 fn pthread_mutex_init(
426 mutex_op: &OpTy<'tcx, Tag>,
427 attr_op: &OpTy<'tcx, Tag>,
428 ) -> InterpResult<'tcx, i32> {
429 let this = self.eval_context_mut();
431 let attr = this.read_pointer(attr_op)?;
432 let kind = if this.ptr_is_null(attr)? {
433 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
435 mutexattr_get_kind(this, attr_op)?.check_init()?
438 // Write 0 to use the same code path as the static initializers.
439 mutex_set_id(this, mutex_op, Scalar::from_i32(0))?;
441 mutex_set_kind(this, mutex_op, kind)?;
446 fn pthread_mutex_lock(&mut self, mutex_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
447 let this = self.eval_context_mut();
449 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
450 let id = mutex_get_or_create_id(this, mutex_op)?;
451 let active_thread = this.get_active_thread();
453 if this.mutex_is_locked(id) {
454 let owner_thread = this.mutex_get_owner(id);
455 if owner_thread != active_thread {
456 // Enqueue the active thread.
457 this.mutex_enqueue_and_block(id, active_thread);
460 // Trying to acquire the same mutex again.
461 if is_mutex_kind_default(this, kind)? {
462 throw_ub_format!("trying to acquire already locked default mutex");
463 } else if is_mutex_kind_normal(this, kind)? {
464 throw_machine_stop!(TerminationInfo::Deadlock);
465 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
466 this.eval_libc_i32("EDEADLK")
467 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
468 this.mutex_lock(id, active_thread);
472 "called pthread_mutex_lock on an unsupported type of mutex"
477 // The mutex is unlocked. Let's lock it.
478 this.mutex_lock(id, active_thread);
483 fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
484 let this = self.eval_context_mut();
486 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
487 let id = mutex_get_or_create_id(this, mutex_op)?;
488 let active_thread = this.get_active_thread();
490 if this.mutex_is_locked(id) {
491 let owner_thread = this.mutex_get_owner(id);
492 if owner_thread != active_thread {
493 this.eval_libc_i32("EBUSY")
495 if is_mutex_kind_default(this, kind)?
496 || is_mutex_kind_normal(this, kind)?
497 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
499 this.eval_libc_i32("EBUSY")
500 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
501 this.mutex_lock(id, active_thread);
505 "called pthread_mutex_trylock on an unsupported type of mutex"
510 // The mutex is unlocked. Let's lock it.
511 this.mutex_lock(id, active_thread);
516 fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
517 let this = self.eval_context_mut();
519 let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
520 let id = mutex_get_or_create_id(this, mutex_op)?;
521 let active_thread = this.get_active_thread();
523 if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
524 // The mutex was locked by the current thread.
527 // The mutex was locked by another thread or not locked at all. See
528 // the “Unlock When Not Owner” column in
529 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
530 if is_mutex_kind_default(this, kind)? {
532 "unlocked a default mutex that was not locked by the current thread"
534 } else if is_mutex_kind_normal(this, kind)? {
536 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
538 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
539 this.eval_libc_i32("EPERM")
540 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
541 this.eval_libc_i32("EPERM")
543 throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
548 fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
549 let this = self.eval_context_mut();
551 let id = mutex_get_or_create_id(this, mutex_op)?;
553 if this.mutex_is_locked(id) {
554 throw_ub_format!("destroyed a locked mutex");
557 // Destroying an uninit pthread_mutex is UB, so check to make sure it's not uninit.
558 mutex_get_kind(this, mutex_op)?.check_init()?;
559 mutex_get_id(this, mutex_op)?.check_init()?;
561 // This might lead to false positives, see comment in pthread_mutexattr_destroy
562 mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?;
563 mutex_set_id(this, mutex_op, ScalarMaybeUninit::Uninit)?;
564 // FIXME: delete interpreter state associated with this mutex.
569 fn pthread_rwlock_rdlock(&mut self, rwlock_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
570 let this = self.eval_context_mut();
572 let id = rwlock_get_or_create_id(this, rwlock_op)?;
573 let active_thread = this.get_active_thread();
575 if this.rwlock_is_write_locked(id) {
576 this.rwlock_enqueue_and_block_reader(id, active_thread);
579 this.rwlock_reader_lock(id, active_thread);
584 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
585 let this = self.eval_context_mut();
587 let id = rwlock_get_or_create_id(this, rwlock_op)?;
588 let active_thread = this.get_active_thread();
590 if this.rwlock_is_write_locked(id) {
591 this.eval_libc_i32("EBUSY")
593 this.rwlock_reader_lock(id, active_thread);
598 fn pthread_rwlock_wrlock(&mut self, rwlock_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
599 let this = self.eval_context_mut();
601 let id = rwlock_get_or_create_id(this, rwlock_op)?;
602 let active_thread = this.get_active_thread();
604 if this.rwlock_is_locked(id) {
605 // Note: this will deadlock if the lock is already locked by this
606 // thread in any way.
608 // Relevant documentation:
609 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
610 // An in-depth discussion on this topic:
611 // https://github.com/rust-lang/rust/issues/53127
613 // FIXME: Detect and report the deadlock proactively. (We currently
614 // report the deadlock only when no thread can continue execution,
615 // but we could detect that this lock is already locked and report
617 this.rwlock_enqueue_and_block_writer(id, active_thread);
619 this.rwlock_writer_lock(id, active_thread);
625 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
626 let this = self.eval_context_mut();
628 let id = rwlock_get_or_create_id(this, rwlock_op)?;
629 let active_thread = this.get_active_thread();
631 if this.rwlock_is_locked(id) {
632 this.eval_libc_i32("EBUSY")
634 this.rwlock_writer_lock(id, active_thread);
639 fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
640 let this = self.eval_context_mut();
642 let id = rwlock_get_or_create_id(this, rwlock_op)?;
643 let active_thread = this.get_active_thread();
645 if this.rwlock_reader_unlock(id, active_thread) {
647 } else if this.rwlock_writer_unlock(id, active_thread) {
650 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
654 fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
655 let this = self.eval_context_mut();
657 let id = rwlock_get_or_create_id(this, rwlock_op)?;
659 if this.rwlock_is_locked(id) {
660 throw_ub_format!("destroyed a locked rwlock");
663 // Destroying an uninit pthread_rwlock is UB, so check to make sure it's not uninit.
664 rwlock_get_id(this, rwlock_op)?.check_init()?;
666 // This might lead to false positives, see comment in pthread_mutexattr_destroy
667 rwlock_set_id(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
668 // FIXME: delete interpreter state associated with this rwlock.
673 fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
674 let this = self.eval_context_mut();
676 // The default value of the clock attribute shall refer to the system
678 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
679 let default_clock_id = this.eval_libc("CLOCK_REALTIME")?;
680 condattr_set_clock_id(this, attr_op, default_clock_id)?;
685 fn pthread_condattr_setclock(
687 attr_op: &OpTy<'tcx, Tag>,
688 clock_id_op: &OpTy<'tcx, Tag>,
689 ) -> InterpResult<'tcx, i32> {
690 let this = self.eval_context_mut();
692 let clock_id = this.read_scalar(clock_id_op)?.check_init()?;
693 if clock_id == this.eval_libc("CLOCK_REALTIME")?
694 || clock_id == this.eval_libc("CLOCK_MONOTONIC")?
696 condattr_set_clock_id(this, attr_op, clock_id)?;
698 let einval = this.eval_libc_i32("EINVAL")?;
705 fn pthread_condattr_getclock(
707 attr_op: &OpTy<'tcx, Tag>,
708 clk_id_op: &OpTy<'tcx, Tag>,
709 ) -> InterpResult<'tcx, i32> {
710 let this = self.eval_context_mut();
712 let clock_id = condattr_get_clock_id(this, attr_op)?;
713 this.write_scalar(clock_id, &this.deref_operand(clk_id_op)?.into())?;
718 fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
719 let this = self.eval_context_mut();
721 // Destroying an uninit pthread_condattr is UB, so check to make sure it's not uninit.
722 condattr_get_clock_id(this, attr_op)?.check_init()?;
724 // This might lead to false positives, see comment in pthread_mutexattr_destroy
725 condattr_set_clock_id(this, attr_op, ScalarMaybeUninit::Uninit)?;
730 fn pthread_cond_init(
732 cond_op: &OpTy<'tcx, Tag>,
733 attr_op: &OpTy<'tcx, Tag>,
734 ) -> InterpResult<'tcx, i32> {
735 let this = self.eval_context_mut();
737 let attr = this.read_pointer(attr_op)?;
738 let clock_id = if this.ptr_is_null(attr)? {
739 this.eval_libc("CLOCK_REALTIME")?
741 condattr_get_clock_id(this, attr_op)?.check_init()?
744 // Write 0 to use the same code path as the static initializers.
745 cond_set_id(this, cond_op, Scalar::from_i32(0))?;
747 cond_set_clock_id(this, cond_op, clock_id)?;
752 fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
753 let this = self.eval_context_mut();
754 let id = cond_get_or_create_id(this, cond_op)?;
755 if let Some((thread, mutex)) = this.condvar_signal(id) {
756 post_cond_signal(this, thread, mutex)?;
762 fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
763 let this = self.eval_context_mut();
764 let id = cond_get_or_create_id(this, cond_op)?;
766 while let Some((thread, mutex)) = this.condvar_signal(id) {
767 post_cond_signal(this, thread, mutex)?;
773 fn pthread_cond_wait(
775 cond_op: &OpTy<'tcx, Tag>,
776 mutex_op: &OpTy<'tcx, Tag>,
777 ) -> InterpResult<'tcx, i32> {
778 let this = self.eval_context_mut();
780 let id = cond_get_or_create_id(this, cond_op)?;
781 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
782 let active_thread = this.get_active_thread();
784 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
785 this.condvar_wait(id, active_thread, mutex_id);
790 fn pthread_cond_timedwait(
792 cond_op: &OpTy<'tcx, Tag>,
793 mutex_op: &OpTy<'tcx, Tag>,
794 abstime_op: &OpTy<'tcx, Tag>,
795 dest: &PlaceTy<'tcx, Tag>,
796 ) -> InterpResult<'tcx> {
797 let this = self.eval_context_mut();
799 this.check_no_isolation("`pthread_cond_timedwait`")?;
801 let id = cond_get_or_create_id(this, cond_op)?;
802 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
803 let active_thread = this.get_active_thread();
805 // Extract the timeout.
806 let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?;
807 let duration = match this.read_timespec(&this.deref_operand(abstime_op)?)? {
808 Some(duration) => duration,
810 let einval = this.eval_libc("EINVAL")?;
811 this.write_scalar(einval, dest)?;
816 let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? {
817 Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
818 } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? {
819 Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap())
821 throw_unsup_format!("unsupported clock id: {}", clock_id);
824 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
825 this.condvar_wait(id, active_thread, mutex_id);
827 // We return success for now and override it in the timeout callback.
828 this.write_scalar(Scalar::from_i32(0), dest)?;
832 // Register the timeout callback.
833 this.register_timeout_callback(
836 Box::new(move |ecx| {
837 // We are not waiting for the condvar any more, wait for the
839 reacquire_cond_mutex(ecx, active_thread, mutex_id)?;
841 // Remove the thread from the conditional variable.
842 ecx.condvar_remove_waiter(id, active_thread);
844 // Set the return value: we timed out.
845 let etimedout = ecx.eval_libc("ETIMEDOUT")?;
846 ecx.write_scalar(etimedout, &dest)?;
855 fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
856 let this = self.eval_context_mut();
858 let id = cond_get_or_create_id(this, cond_op)?;
859 if this.condvar_is_awaited(id) {
860 throw_ub_format!("destroying an awaited conditional variable");
863 // Destroying an uninit pthread_cond is UB, so check to make sure it's not uninit.
864 cond_get_id(this, cond_op)?.check_init()?;
865 cond_get_clock_id(this, cond_op)?.check_init()?;
867 // This might lead to false positives, see comment in pthread_mutexattr_destroy
868 cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
869 cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
870 // FIXME: delete interpreter state associated with this condvar.
876 fn layout_of_maybe_uninit<'tcx>(tcx: TyCtxtAt<'tcx>, param: Ty<'tcx>) -> TyAndLayout<'tcx> {
877 let def_id = tcx.require_lang_item(LangItem::MaybeUninit, None);
878 let ty = tcx.bound_type_of(def_id).subst(*tcx, &[param.into()]);
880 let param_env = tcx.param_env(def_id);
881 tcx.layout_of(param_env.and(ty)).unwrap()