1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
5 use crate::threads::{BlockSetId, ThreadId};
8 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
9 ecx: &MiriEvalContext<'mir, 'tcx>,
10 operand: OpTy<'tcx, Tag>,
12 ) -> InterpResult<'tcx, ()> {
13 let target_ty = match operand.layout.ty.kind {
14 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
15 _ => panic!("Argument to pthread function was not a raw pointer"),
17 let target_layout = ecx.layout_of(target_ty)?;
18 assert!(target_layout.size.bytes() >= min_size);
22 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
24 // Our chosen memory layout for emulation (does not have to match the platform layout!):
25 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
26 // (e.g. PTHREAD_MUTEX_NORMAL).
28 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
29 ecx: &MiriEvalContext<'mir, 'tcx>,
30 attr_op: OpTy<'tcx, Tag>,
31 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
32 // Ensure that the following read at an offset to the attr pointer is within bounds
33 assert_ptr_target_min_size(ecx, attr_op, 4)?;
34 let attr_place = ecx.deref_operand(attr_op)?;
36 attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?;
37 ecx.read_scalar(kind_place.into())
40 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
41 ecx: &mut MiriEvalContext<'mir, 'tcx>,
42 attr_op: OpTy<'tcx, Tag>,
43 kind: impl Into<ScalarMaybeUndef<Tag>>,
44 ) -> InterpResult<'tcx, ()> {
45 // Ensure that the following write at an offset to the attr pointer is within bounds
46 assert_ptr_target_min_size(ecx, attr_op, 4)?;
47 let attr_place = ecx.deref_operand(attr_op)?;
49 attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?;
50 ecx.write_scalar(kind.into(), kind_place.into())
53 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
55 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
56 // bytes 0-3: reserved for signature on macOS
57 // (need to avoid this because it is set by static initializer macros)
58 // bytes 4-7: count of how many times this mutex has been locked, as a u32
59 // bytes 8-11: when count > 0, id of the owner thread as a u32
60 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
61 // (the kind has to be at its offset for compatibility with static initializer macros)
62 // bytes 20-23: when count > 0, id of the blockset in which the blocked threads are waiting.
64 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
65 ecx: &MiriEvalContext<'mir, 'tcx>,
66 mutex_op: OpTy<'tcx, Tag>,
67 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
68 // Ensure that the following read at an offset to the mutex pointer is within bounds
69 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
70 let mutex_place = ecx.deref_operand(mutex_op)?;
71 let locked_count_place = mutex_place.offset(
74 ecx.machine.layouts.u32,
77 ecx.read_scalar(locked_count_place.into())
80 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
81 ecx: &mut MiriEvalContext<'mir, 'tcx>,
82 mutex_op: OpTy<'tcx, Tag>,
83 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
84 ) -> InterpResult<'tcx, ()> {
85 // Ensure that the following write at an offset to the mutex pointer is within bounds
86 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
87 let mutex_place = ecx.deref_operand(mutex_op)?;
88 let locked_count_place = mutex_place.offset(
91 ecx.machine.layouts.u32,
94 ecx.write_scalar(locked_count.into(), locked_count_place.into())
97 fn mutex_get_owner<'mir, 'tcx: 'mir>(
98 ecx: &MiriEvalContext<'mir, 'tcx>,
99 mutex_op: OpTy<'tcx, Tag>,
100 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
101 // Ensure that the following read at an offset to the mutex pointer is within bounds
102 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
103 let mutex_place = ecx.deref_operand(mutex_op)?;
104 let mutex_id_place = mutex_place.offset(
107 ecx.machine.layouts.u32,
110 ecx.read_scalar(mutex_id_place.into())
113 fn mutex_set_owner<'mir, 'tcx: 'mir>(
114 ecx: &mut MiriEvalContext<'mir, 'tcx>,
115 mutex_op: OpTy<'tcx, Tag>,
116 mutex_id: impl Into<ScalarMaybeUndef<Tag>>,
117 ) -> InterpResult<'tcx, ()> {
118 // Ensure that the following write at an offset to the mutex pointer is within bounds
119 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
120 let mutex_place = ecx.deref_operand(mutex_op)?;
121 let mutex_id_place = mutex_place.offset(
124 ecx.machine.layouts.u32,
127 ecx.write_scalar(mutex_id.into(), mutex_id_place.into())
130 fn mutex_get_kind<'mir, 'tcx: 'mir>(
131 ecx: &mut MiriEvalContext<'mir, 'tcx>,
132 mutex_op: OpTy<'tcx, Tag>,
133 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
134 // Ensure that the following read at an offset to the mutex pointer is within bounds
135 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
136 let mutex_place = ecx.deref_operand(mutex_op)?;
137 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
138 let kind_place = mutex_place.offset(
139 Size::from_bytes(kind_offset),
141 ecx.machine.layouts.i32,
144 ecx.read_scalar(kind_place.into())
147 fn mutex_set_kind<'mir, 'tcx: 'mir>(
148 ecx: &mut MiriEvalContext<'mir, 'tcx>,
149 mutex_op: OpTy<'tcx, Tag>,
150 kind: impl Into<ScalarMaybeUndef<Tag>>,
151 ) -> InterpResult<'tcx, ()> {
152 // Ensure that the following write at an offset to the mutex pointer is within bounds
153 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
154 let mutex_place = ecx.deref_operand(mutex_op)?;
155 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
156 let kind_place = mutex_place.offset(
157 Size::from_bytes(kind_offset),
159 ecx.machine.layouts.i32,
162 ecx.write_scalar(kind.into(), kind_place.into())
165 fn mutex_get_blockset<'mir, 'tcx: 'mir>(
166 ecx: &MiriEvalContext<'mir, 'tcx>,
167 mutex_op: OpTy<'tcx, Tag>,
168 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
169 // Ensure that the following read at an offset to the mutex pointer is within bounds
170 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
171 let mutex_place = ecx.deref_operand(mutex_op)?;
172 let mutex_id_place = mutex_place.offset(
173 Size::from_bytes(20),
175 ecx.machine.layouts.u32,
178 ecx.read_scalar(mutex_id_place.into())
181 fn mutex_set_blockset<'mir, 'tcx: 'mir>(
182 ecx: &mut MiriEvalContext<'mir, 'tcx>,
183 mutex_op: OpTy<'tcx, Tag>,
184 mutex_id: impl Into<ScalarMaybeUndef<Tag>>,
185 ) -> InterpResult<'tcx, ()> {
186 // Ensure that the following write at an offset to the mutex pointer is within bounds
187 assert_ptr_target_min_size(ecx, mutex_op, 24)?;
188 let mutex_place = ecx.deref_operand(mutex_op)?;
189 let mutex_id_place = mutex_place.offset(
190 Size::from_bytes(20),
192 ecx.machine.layouts.u32,
195 ecx.write_scalar(mutex_id.into(), mutex_id_place.into())
198 fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>(
199 ecx: &mut MiriEvalContext<'mir, 'tcx>,
200 mutex_op: OpTy<'tcx, Tag>,
201 ) -> InterpResult<'tcx, BlockSetId> {
202 let blockset = mutex_get_blockset(ecx, mutex_op)?.to_u32()?;
204 // 0 is a default value and also not a valid blockset id. Need to
205 // allocate a new blockset.
206 let blockset = ecx.create_blockset()?;
207 mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?;
214 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
216 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
217 // bytes 0-3: reserved for signature on macOS
218 // (need to avoid this because it is set by static initializer macros)
219 // bytes 4-7: reader count, as a u32
220 // bytes 8-11: writer count, as a u32
221 // bytes 12-15: when writer or reader count > 0, id of the blockset in which the
222 // blocked writers are waiting.
223 // bytes 16-20: when writer count > 0, id of the blockset in which the blocked
224 // readers are waiting.
226 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
227 ecx: &MiriEvalContext<'mir, 'tcx>,
228 rwlock_op: OpTy<'tcx, Tag>,
229 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
230 // Ensure that the following read at an offset to the rwlock pointer is within bounds
231 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
232 let rwlock_place = ecx.deref_operand(rwlock_op)?;
233 let readers_place = rwlock_place.offset(
236 ecx.machine.layouts.u32,
239 ecx.read_scalar(readers_place.into())
242 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
243 ecx: &mut MiriEvalContext<'mir, 'tcx>,
244 rwlock_op: OpTy<'tcx, Tag>,
245 readers: impl Into<ScalarMaybeUndef<Tag>>,
246 ) -> InterpResult<'tcx, ()> {
247 // Ensure that the following write at an offset to the rwlock pointer is within bounds
248 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
249 let rwlock_place = ecx.deref_operand(rwlock_op)?;
250 let readers_place = rwlock_place.offset(
253 ecx.machine.layouts.u32,
256 ecx.write_scalar(readers.into(), readers_place.into())
259 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
260 ecx: &MiriEvalContext<'mir, 'tcx>,
261 rwlock_op: OpTy<'tcx, Tag>,
262 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
263 // Ensure that the following read at an offset to the rwlock pointer is within bounds
264 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
265 let rwlock_place = ecx.deref_operand(rwlock_op)?;
266 let writers_place = rwlock_place.offset(
269 ecx.machine.layouts.u32,
272 ecx.read_scalar(writers_place.into())
275 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
276 ecx: &mut MiriEvalContext<'mir, 'tcx>,
277 rwlock_op: OpTy<'tcx, Tag>,
278 writers: impl Into<ScalarMaybeUndef<Tag>>,
279 ) -> InterpResult<'tcx, ()> {
280 // Ensure that the following write at an offset to the rwlock pointer is within bounds
281 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
282 let rwlock_place = ecx.deref_operand(rwlock_op)?;
283 let writers_place = rwlock_place.offset(
286 ecx.machine.layouts.u32,
289 ecx.write_scalar(writers.into(), writers_place.into())
292 fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>(
293 ecx: &MiriEvalContext<'mir, 'tcx>,
294 rwlock_op: OpTy<'tcx, Tag>,
295 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
296 // Ensure that the following read at an offset to the rwlock pointer is within bounds
297 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
298 let rwlock_place = ecx.deref_operand(rwlock_op)?;
299 let blockset_place = rwlock_place.offset(
300 Size::from_bytes(12),
302 ecx.machine.layouts.u32,
305 ecx.read_scalar(blockset_place.into())
308 fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>(
309 ecx: &mut MiriEvalContext<'mir, 'tcx>,
310 rwlock_op: OpTy<'tcx, Tag>,
311 blockset: impl Into<ScalarMaybeUndef<Tag>>,
312 ) -> InterpResult<'tcx, ()> {
313 // Ensure that the following write at an offset to the rwlock pointer is within bounds
314 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
315 let rwlock_place = ecx.deref_operand(rwlock_op)?;
316 let blockset_place = rwlock_place.offset(
317 Size::from_bytes(12),
319 ecx.machine.layouts.u32,
322 ecx.write_scalar(blockset.into(), blockset_place.into())
325 fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>(
326 ecx: &mut MiriEvalContext<'mir, 'tcx>,
327 rwlock_op: OpTy<'tcx, Tag>,
328 ) -> InterpResult<'tcx, BlockSetId> {
329 let blockset = rwlock_get_writer_blockset(ecx, rwlock_op)?.to_u32()?;
331 // 0 is a default value and also not a valid blockset id. Need to
332 // allocate a new blockset.
333 let blockset = ecx.create_blockset()?;
334 rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
341 fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>(
342 ecx: &MiriEvalContext<'mir, 'tcx>,
343 rwlock_op: OpTy<'tcx, Tag>,
344 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
345 // Ensure that the following read at an offset to the rwlock pointer is within bounds
346 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
347 let rwlock_place = ecx.deref_operand(rwlock_op)?;
348 let blockset_place = rwlock_place.offset(
349 Size::from_bytes(16),
351 ecx.machine.layouts.u32,
354 ecx.read_scalar(blockset_place.into())
357 fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>(
358 ecx: &mut MiriEvalContext<'mir, 'tcx>,
359 rwlock_op: OpTy<'tcx, Tag>,
360 blockset: impl Into<ScalarMaybeUndef<Tag>>,
361 ) -> InterpResult<'tcx, ()> {
362 // Ensure that the following write at an offset to the rwlock pointer is within bounds
363 assert_ptr_target_min_size(ecx, rwlock_op, 20)?;
364 let rwlock_place = ecx.deref_operand(rwlock_op)?;
365 let blockset_place = rwlock_place.offset(
366 Size::from_bytes(16),
368 ecx.machine.layouts.u32,
371 ecx.write_scalar(blockset.into(), blockset_place.into())
374 fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>(
375 ecx: &mut MiriEvalContext<'mir, 'tcx>,
376 rwlock_op: OpTy<'tcx, Tag>,
377 ) -> InterpResult<'tcx, BlockSetId> {
378 let blockset = rwlock_get_reader_blockset(ecx, rwlock_op)?.to_u32()?;
380 // 0 is a default value and also not a valid blockset id. Need to
381 // allocate a new blockset.
382 let blockset = ecx.create_blockset()?;
383 rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
390 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
391 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
392 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
393 let this = self.eval_context_mut();
395 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
396 mutexattr_set_kind(this, attr_op, default_kind)?;
401 fn pthread_mutexattr_settype(
403 attr_op: OpTy<'tcx, Tag>,
404 kind_op: OpTy<'tcx, Tag>,
405 ) -> InterpResult<'tcx, i32> {
406 let this = self.eval_context_mut();
408 let kind = this.read_scalar(kind_op)?.not_undef()?;
409 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
410 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
411 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
413 mutexattr_set_kind(this, attr_op, kind)?;
415 let einval = this.eval_libc_i32("EINVAL")?;
422 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
423 let this = self.eval_context_mut();
425 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
430 fn pthread_mutex_init(
432 mutex_op: OpTy<'tcx, Tag>,
433 attr_op: OpTy<'tcx, Tag>,
434 ) -> InterpResult<'tcx, i32> {
435 let this = self.eval_context_mut();
437 let attr = this.read_scalar(attr_op)?.not_undef()?;
438 let kind = if this.is_null(attr)? {
439 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
441 mutexattr_get_kind(this, attr_op)?.not_undef()?
444 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
445 mutex_set_kind(this, mutex_op, kind)?;
450 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
451 let this = self.eval_context_mut();
453 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
454 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
455 let active_thread = this.get_active_thread()?;
457 if locked_count == 0 {
458 // The mutex is unlocked. Let's lock it.
459 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
460 mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
463 // The mutex is locked. Let's check by whom.
464 let owner_thread: ThreadId =
465 mutex_get_owner(this, mutex_op)?.not_undef()?.to_u32()?.into();
466 if owner_thread != active_thread {
467 // Block the active thread.
468 let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
469 this.block_active_thread(blockset)?;
472 // Trying to acquire the same mutex again.
473 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
474 throw_machine_stop!(TerminationInfo::Deadlock);
475 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
476 this.eval_libc_i32("EDEADLK")
477 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
478 match locked_count.checked_add(1) {
480 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
483 None => this.eval_libc_i32("EAGAIN"),
486 throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex");
492 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
493 let this = self.eval_context_mut();
495 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
496 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
497 let active_thread = this.get_active_thread()?;
499 if locked_count == 0 {
500 // The mutex is unlocked. Let's lock it.
501 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
502 mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
505 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
506 if owner_thread != active_thread {
507 this.eval_libc_i32("EBUSY")
509 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
510 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
512 this.eval_libc_i32("EBUSY")
513 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
514 match locked_count.checked_add(1) {
516 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
519 None => this.eval_libc_i32("EAGAIN"),
523 "called pthread_mutex_trylock on an unsupported type of mutex"
530 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
531 let this = self.eval_context_mut();
533 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
534 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
535 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
537 if owner_thread != this.get_active_thread()? {
538 throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread");
539 } else if locked_count == 1 {
540 let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
541 if let Some(new_owner) = this.unblock_random_thread(blockset)? {
542 // We have at least one thread waiting on this mutex. Transfer
544 mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?;
546 // No thread is waiting on this mutex.
547 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
551 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
552 throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked");
553 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
554 this.eval_libc_i32("EPERM")
555 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
556 match locked_count.checked_sub(1) {
558 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
562 // locked_count was already zero
563 this.eval_libc_i32("EPERM")
567 throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex");
572 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
573 let this = self.eval_context_mut();
575 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
576 throw_ub_format!("destroyed a locked mutex");
579 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
580 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
581 mutex_set_blockset(this, mutex_op, ScalarMaybeUndef::Undef)?;
586 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
587 let this = self.eval_context_mut();
589 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
590 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
593 // The lock is locked by a writer.
594 assert_eq!(writers, 1);
595 let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
596 this.block_active_thread(reader_blockset)?;
599 match readers.checked_add(1) {
600 Some(new_readers) => {
601 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
604 None => this.eval_libc_i32("EAGAIN"),
609 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
610 let this = self.eval_context_mut();
612 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
613 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
615 this.eval_libc_i32("EBUSY")
617 match readers.checked_add(1) {
618 Some(new_readers) => {
619 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
622 None => this.eval_libc_i32("EAGAIN"),
627 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
628 let this = self.eval_context_mut();
630 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
631 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
632 let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
633 if readers != 0 || writers != 0 {
634 this.block_active_thread(writer_blockset)?;
636 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
641 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
642 let this = self.eval_context_mut();
644 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
645 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
646 if readers != 0 || writers != 0 {
647 this.eval_libc_i32("EBUSY")
649 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
654 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
655 let this = self.eval_context_mut();
657 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
658 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
659 let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
660 if let Some(new_readers) = readers.checked_sub(1) {
661 assert_eq!(writers, 0);
662 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
663 if new_readers == 0 {
664 if let Some(_writer) = this.unblock_random_thread(writer_blockset)? {
665 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
669 } else if writers != 0 {
670 let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
671 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
672 if let Some(_writer) = this.unblock_random_thread(writer_blockset)? {
673 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
676 while let Some(_reader) = this.unblock_random_thread(reader_blockset)? {
679 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))?
683 throw_ub_format!("unlocked an rwlock that was not locked");
687 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
688 let this = self.eval_context_mut();
690 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0
691 || rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0
693 throw_ub_format!("destroyed a locked rwlock");
696 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
697 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
698 rwlock_set_reader_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?;
699 rwlock_set_writer_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?;