1 use std::convert::TryInto;
2 use std::time::{Duration, SystemTime};
5 use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
6 use rustc_target::abi::{LayoutOf, Size};
8 use crate::stacked_borrows::Tag;
9 use crate::thread::Time;
13 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
14 ecx: &MiriEvalContext<'mir, 'tcx>,
15 operand: OpTy<'tcx, Tag>,
17 ) -> InterpResult<'tcx, ()> {
18 let target_ty = match operand.layout.ty.kind {
19 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
20 _ => panic!("Argument to pthread function was not a raw pointer"),
22 let target_layout = ecx.layout_of(target_ty)?;
23 assert!(target_layout.size.bytes() >= min_size);
27 fn get_at_offset<'mir, 'tcx: 'mir>(
28 ecx: &MiriEvalContext<'mir, 'tcx>,
31 layout: TyAndLayout<'tcx>,
33 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
34 // Ensure that the following read at an offset to the attr pointer is within bounds
35 assert_ptr_target_min_size(ecx, op, min_size)?;
36 let op_place = ecx.deref_operand(op)?;
37 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
38 ecx.read_scalar(value_place.into())
41 fn set_at_offset<'mir, 'tcx: 'mir>(
42 ecx: &mut MiriEvalContext<'mir, 'tcx>,
45 value: impl Into<ScalarMaybeUninit<Tag>>,
46 layout: TyAndLayout<'tcx>,
48 ) -> InterpResult<'tcx, ()> {
49 // Ensure that the following write at an offset to the attr pointer is within bounds
50 assert_ptr_target_min_size(ecx, op, min_size)?;
51 let op_place = ecx.deref_operand(op)?;
52 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
53 ecx.write_scalar(value.into(), value_place.into())
56 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
58 // Our chosen memory layout for emulation (does not have to match the platform layout!):
59 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
60 // (e.g. PTHREAD_MUTEX_NORMAL).
62 /// A flag that allows to distinguish `PTHREAD_MUTEX_NORMAL` from
63 /// `PTHREAD_MUTEX_DEFAULT`. Since in `glibc` they have the same numeric values,
64 /// but different behaviour, we need a way to distinguish them. We do this by
65 /// setting this bit flag to the `PTHREAD_MUTEX_NORMAL` mutexes. See the comment
66 /// in `pthread_mutexattr_settype` function.
67 const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
69 const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4;
71 fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
72 ecx: &mut MiriEvalContext<'mir, 'tcx>,
74 ) -> InterpResult<'tcx, bool> {
75 Ok(kind == ecx.eval_libc("PTHREAD_MUTEX_DEFAULT")?)
78 fn is_mutex_kind_normal<'mir, 'tcx: 'mir>(
79 ecx: &mut MiriEvalContext<'mir, 'tcx>,
81 ) -> InterpResult<'tcx, bool> {
82 let kind = kind.to_i32()?;
83 let mutex_normal_kind = ecx.eval_libc("PTHREAD_MUTEX_NORMAL")?.to_i32()?;
84 Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
87 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
88 ecx: &MiriEvalContext<'mir, 'tcx>,
89 attr_op: OpTy<'tcx, Tag>,
90 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
91 get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
94 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
95 ecx: &mut MiriEvalContext<'mir, 'tcx>,
96 attr_op: OpTy<'tcx, Tag>,
97 kind: impl Into<ScalarMaybeUninit<Tag>>,
98 ) -> InterpResult<'tcx, ()> {
99 set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
102 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
104 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
105 // bytes 0-3: reserved for signature on macOS
106 // (need to avoid this because it is set by static initializer macros)
107 // bytes 4-7: mutex id as u32 or 0 if id is not assigned yet.
108 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
109 // (the kind has to be at its offset for compatibility with static initializer macros)
111 const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24;
113 fn mutex_get_kind<'mir, 'tcx: 'mir>(
114 ecx: &mut MiriEvalContext<'mir, 'tcx>,
115 mutex_op: OpTy<'tcx, Tag>,
116 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
117 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
118 get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
121 fn mutex_set_kind<'mir, 'tcx: 'mir>(
122 ecx: &mut MiriEvalContext<'mir, 'tcx>,
123 mutex_op: OpTy<'tcx, Tag>,
124 kind: impl Into<ScalarMaybeUninit<Tag>>,
125 ) -> InterpResult<'tcx, ()> {
126 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
127 set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
130 fn mutex_get_id<'mir, 'tcx: 'mir>(
131 ecx: &MiriEvalContext<'mir, 'tcx>,
132 mutex_op: OpTy<'tcx, Tag>,
133 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
134 get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
137 fn mutex_set_id<'mir, 'tcx: 'mir>(
138 ecx: &mut MiriEvalContext<'mir, 'tcx>,
139 mutex_op: OpTy<'tcx, Tag>,
140 id: impl Into<ScalarMaybeUninit<Tag>>,
141 ) -> InterpResult<'tcx, ()> {
142 set_at_offset(ecx, mutex_op, 4, id, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
145 fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
146 ecx: &mut MiriEvalContext<'mir, 'tcx>,
147 mutex_op: OpTy<'tcx, Tag>,
148 ) -> InterpResult<'tcx, MutexId> {
149 let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
151 // 0 is a default value and also not a valid mutex id. Need to allocate
153 let id = ecx.mutex_create();
154 mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
157 Ok(MutexId::from_u32(id))
161 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
163 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
164 // bytes 0-3: reserved for signature on macOS
165 // (need to avoid this because it is set by static initializer macros)
166 // bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
168 const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 32;
170 fn rwlock_get_id<'mir, 'tcx: 'mir>(
171 ecx: &MiriEvalContext<'mir, 'tcx>,
172 rwlock_op: OpTy<'tcx, Tag>,
173 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
174 get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
177 fn rwlock_set_id<'mir, 'tcx: 'mir>(
178 ecx: &mut MiriEvalContext<'mir, 'tcx>,
179 rwlock_op: OpTy<'tcx, Tag>,
180 id: impl Into<ScalarMaybeUninit<Tag>>,
181 ) -> InterpResult<'tcx, ()> {
182 set_at_offset(ecx, rwlock_op, 4, id, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
185 fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
186 ecx: &mut MiriEvalContext<'mir, 'tcx>,
187 rwlock_op: OpTy<'tcx, Tag>,
188 ) -> InterpResult<'tcx, RwLockId> {
189 let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
191 // 0 is a default value and also not a valid rwlock id. Need to allocate
192 // a new read-write lock.
193 let id = ecx.rwlock_create();
194 rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
197 Ok(RwLockId::from_u32(id))
201 // pthread_condattr_t
203 // Our chosen memory layout for emulation (does not have to match the platform layout!):
204 // store an i32 in the first four bytes equal to the corresponding libc clock id constant
205 // (e.g. CLOCK_REALTIME).
207 const PTHREAD_CONDATTR_T_MIN_SIZE: u64 = 4;
209 fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
210 ecx: &MiriEvalContext<'mir, 'tcx>,
211 attr_op: OpTy<'tcx, Tag>,
212 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
213 get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE)
216 fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
217 ecx: &mut MiriEvalContext<'mir, 'tcx>,
218 attr_op: OpTy<'tcx, Tag>,
219 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
220 ) -> InterpResult<'tcx, ()> {
221 set_at_offset(ecx, attr_op, 0, clock_id, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE)
226 // Our chosen memory layout for the emulated conditional variable (does not have
227 // to match the platform layout!):
229 // bytes 0-3: reserved for signature on macOS
230 // bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
231 // bytes 8-11: the clock id constant as i32
233 const PTHREAD_COND_T_MIN_SIZE: u64 = 12;
235 fn cond_get_id<'mir, 'tcx: 'mir>(
236 ecx: &MiriEvalContext<'mir, 'tcx>,
237 cond_op: OpTy<'tcx, Tag>,
238 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
239 get_at_offset(ecx, cond_op, 4, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE)
242 fn cond_set_id<'mir, 'tcx: 'mir>(
243 ecx: &mut MiriEvalContext<'mir, 'tcx>,
244 cond_op: OpTy<'tcx, Tag>,
245 id: impl Into<ScalarMaybeUninit<Tag>>,
246 ) -> InterpResult<'tcx, ()> {
247 set_at_offset(ecx, cond_op, 4, id, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE)
250 fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
251 ecx: &mut MiriEvalContext<'mir, 'tcx>,
252 cond_op: OpTy<'tcx, Tag>,
253 ) -> InterpResult<'tcx, CondvarId> {
254 let id = cond_get_id(ecx, cond_op)?.to_u32()?;
256 // 0 is a default value and also not a valid conditional variable id.
257 // Need to allocate a new id.
258 let id = ecx.condvar_create();
259 cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
262 Ok(CondvarId::from_u32(id))
266 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
267 ecx: &MiriEvalContext<'mir, 'tcx>,
268 cond_op: OpTy<'tcx, Tag>,
269 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
270 get_at_offset(ecx, cond_op, 8, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE)
273 fn cond_set_clock_id<'mir, 'tcx: 'mir>(
274 ecx: &mut MiriEvalContext<'mir, 'tcx>,
275 cond_op: OpTy<'tcx, Tag>,
276 clock_id: impl Into<ScalarMaybeUninit<Tag>>,
277 ) -> InterpResult<'tcx, ()> {
278 set_at_offset(ecx, cond_op, 8, clock_id, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE)
281 /// Try to reacquire the mutex associated with the condition variable after we
283 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
284 ecx: &mut MiriEvalContext<'mir, 'tcx>,
287 ) -> InterpResult<'tcx> {
288 ecx.unblock_thread(thread);
289 if ecx.mutex_is_locked(mutex) {
290 ecx.mutex_enqueue_and_block(mutex, thread);
292 ecx.mutex_lock(mutex, thread);
297 /// After a thread waiting on a condvar was signalled:
298 /// Reacquire the conditional variable and remove the timeout callback if any
300 fn post_cond_signal<'mir, 'tcx: 'mir>(
301 ecx: &mut MiriEvalContext<'mir, 'tcx>,
304 ) -> InterpResult<'tcx> {
305 reacquire_cond_mutex(ecx, thread, mutex)?;
306 // Waiting for the mutex is not included in the waiting time because we need
307 // to acquire the mutex always even if we get a timeout.
308 ecx.unregister_timeout_callback_if_exists(thread);
312 /// Release the mutex associated with the condition variable because we are
313 /// entering the waiting state.
314 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
315 ecx: &mut MiriEvalContext<'mir, 'tcx>,
316 active_thread: ThreadId,
318 ) -> InterpResult<'tcx> {
319 if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
320 if old_locked_count != 1 {
321 throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
324 throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
326 ecx.block_thread(active_thread);
330 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
331 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
332 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
333 let this = self.eval_context_mut();
335 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
336 mutexattr_set_kind(this, attr_op, default_kind)?;
341 fn pthread_mutexattr_settype(
343 attr_op: OpTy<'tcx, Tag>,
344 kind_op: OpTy<'tcx, Tag>,
345 ) -> InterpResult<'tcx, i32> {
346 let this = self.eval_context_mut();
348 let kind = this.read_scalar(kind_op)?.not_undef()?;
349 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
350 // In `glibc` implementation, the numeric values of
351 // `PTHREAD_MUTEX_NORMAL` and `PTHREAD_MUTEX_DEFAULT` are equal.
352 // However, a mutex created by explicitly passing
353 // `PTHREAD_MUTEX_NORMAL` type has in some cases different behaviour
354 // from the default mutex for which the type was not explicitly
355 // specified. For a more detailed discussion, please see
356 // https://github.com/rust-lang/miri/issues/1419.
358 // To distinguish these two cases in already constructed mutexes, we
359 // use the same trick as glibc: for the case when
360 // `pthread_mutexattr_settype` is caled explicitly, we set the
361 // `PTHREAD_MUTEX_NORMAL_FLAG` flag.
362 let normal_kind = kind.to_i32()? | PTHREAD_MUTEX_NORMAL_FLAG;
363 // Check that after setting the flag, the kind is distinguishable
364 // from all other kinds.
365 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_DEFAULT")?.to_i32()?);
366 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?.to_i32()?);
367 assert_ne!(normal_kind, this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?.to_i32()?);
368 mutexattr_set_kind(this, attr_op, Scalar::from_i32(normal_kind))?;
369 } else if kind == this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
370 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
371 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
373 mutexattr_set_kind(this, attr_op, kind)?;
375 let einval = this.eval_libc_i32("EINVAL")?;
382 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
383 let this = self.eval_context_mut();
385 mutexattr_set_kind(this, attr_op, ScalarMaybeUninit::Uninit)?;
390 fn pthread_mutex_init(
392 mutex_op: OpTy<'tcx, Tag>,
393 attr_op: OpTy<'tcx, Tag>,
394 ) -> InterpResult<'tcx, i32> {
395 let this = self.eval_context_mut();
397 let attr = this.read_scalar(attr_op)?.not_undef()?;
398 let kind = if this.is_null(attr)? {
399 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
401 mutexattr_get_kind(this, attr_op)?.not_undef()?
404 // Write 0 to use the same code path as the static initializers.
405 mutex_set_id(this, mutex_op, Scalar::from_i32(0))?;
407 mutex_set_kind(this, mutex_op, kind)?;
412 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
413 let this = self.eval_context_mut();
415 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
416 let id = mutex_get_or_create_id(this, mutex_op)?;
417 let active_thread = this.get_active_thread();
419 if this.mutex_is_locked(id) {
420 let owner_thread = this.mutex_get_owner(id);
421 if owner_thread != active_thread {
422 // Enqueue the active thread.
423 this.mutex_enqueue_and_block(id, active_thread);
426 // Trying to acquire the same mutex again.
427 if is_mutex_kind_default(this, kind)? {
428 throw_ub_format!("trying to acquire already locked default mutex");
429 } else if is_mutex_kind_normal(this, kind)? {
430 throw_machine_stop!(TerminationInfo::Deadlock);
431 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
432 this.eval_libc_i32("EDEADLK")
433 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
434 this.mutex_lock(id, active_thread);
438 "called pthread_mutex_lock on an unsupported type of mutex"
443 // The mutex is unlocked. Let's lock it.
444 this.mutex_lock(id, active_thread);
449 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
450 let this = self.eval_context_mut();
452 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
453 let id = mutex_get_or_create_id(this, mutex_op)?;
454 let active_thread = this.get_active_thread();
456 if this.mutex_is_locked(id) {
457 let owner_thread = this.mutex_get_owner(id);
458 if owner_thread != active_thread {
459 this.eval_libc_i32("EBUSY")
461 if is_mutex_kind_default(this, kind)?
462 || is_mutex_kind_normal(this, kind)?
463 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
465 this.eval_libc_i32("EBUSY")
466 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
467 this.mutex_lock(id, active_thread);
471 "called pthread_mutex_trylock on an unsupported type of mutex"
476 // The mutex is unlocked. Let's lock it.
477 this.mutex_lock(id, active_thread);
482 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
483 let this = self.eval_context_mut();
485 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
486 let id = mutex_get_or_create_id(this, mutex_op)?;
487 let active_thread = this.get_active_thread();
489 if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
490 // The mutex was locked by the current thread.
493 // The mutex was locked by another thread or not locked at all. See
494 // the “Unlock When Not Owner” column in
495 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
496 if is_mutex_kind_default(this, kind)? {
498 "unlocked a default mutex that was not locked by the current thread"
500 } else if is_mutex_kind_normal(this, kind)? {
502 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
504 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
505 this.eval_libc_i32("EPERM")
506 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
507 this.eval_libc_i32("EPERM")
509 throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
514 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
515 let this = self.eval_context_mut();
517 let id = mutex_get_or_create_id(this, mutex_op)?;
519 if this.mutex_is_locked(id) {
520 throw_ub_format!("destroyed a locked mutex");
523 mutex_set_kind(this, mutex_op, ScalarMaybeUninit::Uninit)?;
524 mutex_set_id(this, mutex_op, ScalarMaybeUninit::Uninit)?;
529 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
530 let this = self.eval_context_mut();
532 let id = rwlock_get_or_create_id(this, rwlock_op)?;
533 let active_thread = this.get_active_thread();
535 if this.rwlock_is_write_locked(id) {
536 this.rwlock_enqueue_and_block_reader(id, active_thread);
539 this.rwlock_reader_lock(id, active_thread);
544 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
545 let this = self.eval_context_mut();
547 let id = rwlock_get_or_create_id(this, rwlock_op)?;
548 let active_thread = this.get_active_thread();
550 if this.rwlock_is_write_locked(id) {
551 this.eval_libc_i32("EBUSY")
553 this.rwlock_reader_lock(id, active_thread);
558 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
559 let this = self.eval_context_mut();
561 let id = rwlock_get_or_create_id(this, rwlock_op)?;
562 let active_thread = this.get_active_thread();
564 if this.rwlock_is_locked(id) {
565 // Note: this will deadlock if the lock is already locked by this
566 // thread in any way.
568 // Relevant documentation:
569 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
570 // An in-depth discussion on this topic:
571 // https://github.com/rust-lang/rust/issues/53127
573 // FIXME: Detect and report the deadlock proactively. (We currently
574 // report the deadlock only when no thread can continue execution,
575 // but we could detect that this lock is already locked and report
577 this.rwlock_enqueue_and_block_writer(id, active_thread);
579 this.rwlock_writer_lock(id, active_thread);
585 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
586 let this = self.eval_context_mut();
588 let id = rwlock_get_or_create_id(this, rwlock_op)?;
589 let active_thread = this.get_active_thread();
591 if this.rwlock_is_locked(id) {
592 this.eval_libc_i32("EBUSY")
594 this.rwlock_writer_lock(id, active_thread);
599 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
600 let this = self.eval_context_mut();
602 let id = rwlock_get_or_create_id(this, rwlock_op)?;
603 let active_thread = this.get_active_thread();
605 if this.rwlock_reader_unlock(id, active_thread) {
606 // The thread was a reader.
607 if this.rwlock_is_locked(id).not() {
608 // No more readers owning the lock. Give it to a writer if there
610 this.rwlock_dequeue_and_lock_writer(id);
613 } else if Some(active_thread) == this.rwlock_writer_unlock(id) {
614 // The thread was a writer.
616 // We are prioritizing writers here against the readers. As a
617 // result, not only readers can starve writers, but also writers can
619 if this.rwlock_dequeue_and_lock_writer(id) {
620 // Someone got the write lock, nice.
622 // Give the lock to all readers.
623 while this.rwlock_dequeue_and_lock_reader(id) {
629 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
633 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
634 let this = self.eval_context_mut();
636 let id = rwlock_get_or_create_id(this, rwlock_op)?;
638 if this.rwlock_is_locked(id) {
639 throw_ub_format!("destroyed a locked rwlock");
642 rwlock_set_id(this, rwlock_op, ScalarMaybeUninit::Uninit)?;
647 fn pthread_condattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
648 let this = self.eval_context_mut();
650 // The default value of the clock attribute shall refer to the system
652 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
653 let default_clock_id = this.eval_libc("CLOCK_REALTIME")?;
654 condattr_set_clock_id(this, attr_op, default_clock_id)?;
659 fn pthread_condattr_setclock(
661 attr_op: OpTy<'tcx, Tag>,
662 clock_id_op: OpTy<'tcx, Tag>,
663 ) -> InterpResult<'tcx, i32> {
664 let this = self.eval_context_mut();
666 let clock_id = this.read_scalar(clock_id_op)?.not_undef()?;
667 if clock_id == this.eval_libc("CLOCK_REALTIME")?
668 || clock_id == this.eval_libc("CLOCK_MONOTONIC")?
670 condattr_set_clock_id(this, attr_op, clock_id)?;
672 let einval = this.eval_libc_i32("EINVAL")?;
679 fn pthread_condattr_getclock(
681 attr_op: OpTy<'tcx, Tag>,
682 clk_id_op: OpTy<'tcx, Tag>,
683 ) -> InterpResult<'tcx, i32> {
684 let this = self.eval_context_mut();
686 let clock_id = condattr_get_clock_id(this, attr_op)?;
687 this.write_scalar(clock_id, this.deref_operand(clk_id_op)?.into())?;
692 fn pthread_condattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
693 let this = self.eval_context_mut();
695 condattr_set_clock_id(this, attr_op, ScalarMaybeUninit::Uninit)?;
700 fn pthread_cond_init(
702 cond_op: OpTy<'tcx, Tag>,
703 attr_op: OpTy<'tcx, Tag>,
704 ) -> InterpResult<'tcx, i32> {
705 let this = self.eval_context_mut();
707 let attr = this.read_scalar(attr_op)?.not_undef()?;
708 let clock_id = if this.is_null(attr)? {
709 this.eval_libc("CLOCK_REALTIME")?
711 condattr_get_clock_id(this, attr_op)?.not_undef()?
714 // Write 0 to use the same code path as the static initializers.
715 cond_set_id(this, cond_op, Scalar::from_i32(0))?;
717 cond_set_clock_id(this, cond_op, clock_id)?;
722 fn pthread_cond_signal(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
723 let this = self.eval_context_mut();
724 let id = cond_get_or_create_id(this, cond_op)?;
725 if let Some((thread, mutex)) = this.condvar_signal(id) {
726 post_cond_signal(this, thread, mutex)?;
732 fn pthread_cond_broadcast(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
733 let this = self.eval_context_mut();
734 let id = cond_get_or_create_id(this, cond_op)?;
736 while let Some((thread, mutex)) = this.condvar_signal(id) {
737 post_cond_signal(this, thread, mutex)?;
743 fn pthread_cond_wait(
745 cond_op: OpTy<'tcx, Tag>,
746 mutex_op: OpTy<'tcx, Tag>,
747 ) -> InterpResult<'tcx, i32> {
748 let this = self.eval_context_mut();
750 let id = cond_get_or_create_id(this, cond_op)?;
751 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
752 let active_thread = this.get_active_thread();
754 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
755 this.condvar_wait(id, active_thread, mutex_id);
760 fn pthread_cond_timedwait(
762 cond_op: OpTy<'tcx, Tag>,
763 mutex_op: OpTy<'tcx, Tag>,
764 abstime_op: OpTy<'tcx, Tag>,
765 dest: PlaceTy<'tcx, Tag>,
766 ) -> InterpResult<'tcx> {
767 let this = self.eval_context_mut();
769 this.check_no_isolation("pthread_cond_timedwait")?;
771 let id = cond_get_or_create_id(this, cond_op)?;
772 let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
773 let active_thread = this.get_active_thread();
775 release_cond_mutex_and_block(this, active_thread, mutex_id)?;
776 this.condvar_wait(id, active_thread, mutex_id);
778 // We return success for now and override it in the timeout callback.
779 this.write_scalar(Scalar::from_i32(0), dest)?;
781 // Extract the timeout.
782 let clock_id = cond_get_clock_id(this, cond_op)?.to_i32()?;
784 let tp = this.deref_operand(abstime_op)?;
785 let seconds_place = this.mplace_field(tp, 0)?;
786 let seconds = this.read_scalar(seconds_place.into())?;
787 let nanoseconds_place = this.mplace_field(tp, 1)?;
788 let nanoseconds = this.read_scalar(nanoseconds_place.into())?;
789 let (seconds, nanoseconds) = (
790 seconds.to_machine_usize(this)?,
791 nanoseconds.to_machine_usize(this)?.try_into().unwrap(),
793 Duration::new(seconds, nanoseconds)
796 let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME")? {
797 Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
798 } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")? {
799 Time::Monotonic(this.machine.time_anchor.checked_add(duration).unwrap())
801 throw_unsup_format!("unsupported clock id: {}", clock_id);
804 // Register the timeout callback.
805 this.register_timeout_callback(
808 Box::new(move |ecx| {
809 // We are not waiting for the condvar any more, wait for the
811 reacquire_cond_mutex(ecx, active_thread, mutex_id)?;
813 // Remove the thread from the conditional variable.
814 ecx.condvar_remove_waiter(id, active_thread);
816 // Set the return value: we timed out.
817 let timeout = ecx.eval_libc_i32("ETIMEDOUT")?;
818 ecx.write_scalar(Scalar::from_i32(timeout), dest)?;
827 fn pthread_cond_destroy(&mut self, cond_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
828 let this = self.eval_context_mut();
830 let id = cond_get_or_create_id(this, cond_op)?;
831 if this.condvar_is_awaited(id) {
832 throw_ub_format!("destroying an awaited conditional variable");
834 cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
835 cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;