1 use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
5 use crate::thread::BlockSetId;
8 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
9 ecx: &MiriEvalContext<'mir, 'tcx>,
10 operand: OpTy<'tcx, Tag>,
12 ) -> InterpResult<'tcx, ()> {
13 let target_ty = match operand.layout.ty.kind {
14 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
15 _ => panic!("Argument to pthread function was not a raw pointer"),
17 let target_layout = ecx.layout_of(target_ty)?;
18 assert!(target_layout.size.bytes() >= min_size);
22 fn get_at_offset<'mir, 'tcx: 'mir>(
23 ecx: &MiriEvalContext<'mir, 'tcx>,
26 layout: TyAndLayout<'tcx>,
28 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
29 // Ensure that the following read at an offset to the attr pointer is within bounds
30 assert_ptr_target_min_size(ecx, op, min_size)?;
31 let op_place = ecx.deref_operand(op)?;
32 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
33 ecx.read_scalar(value_place.into())
36 fn set_at_offset<'mir, 'tcx: 'mir>(
37 ecx: &mut MiriEvalContext<'mir, 'tcx>,
40 value: impl Into<ScalarMaybeUndef<Tag>>,
41 layout: TyAndLayout<'tcx>,
43 ) -> InterpResult<'tcx, ()> {
44 // Ensure that the following write at an offset to the attr pointer is within bounds
45 assert_ptr_target_min_size(ecx, op, min_size)?;
46 let op_place = ecx.deref_operand(op)?;
47 let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
48 ecx.write_scalar(value.into(), value_place.into())
51 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
53 // Our chosen memory layout for emulation (does not have to match the platform layout!):
54 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
55 // (e.g. PTHREAD_MUTEX_NORMAL).
57 const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4;
59 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
60 ecx: &MiriEvalContext<'mir, 'tcx>,
61 attr_op: OpTy<'tcx, Tag>,
62 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
63 get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
66 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
67 ecx: &mut MiriEvalContext<'mir, 'tcx>,
68 attr_op: OpTy<'tcx, Tag>,
69 kind: impl Into<ScalarMaybeUndef<Tag>>,
70 ) -> InterpResult<'tcx, ()> {
71 set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
74 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
76 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
77 // bytes 0-3: reserved for signature on macOS
78 // (need to avoid this because it is set by static initializer macros)
79 // bytes 4-7: count of how many times this mutex has been locked, as a u32
80 // bytes 8-11: when count > 0, id of the owner thread as a u32
81 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
82 // (the kind has to be at its offset for compatibility with static initializer macros)
83 // bytes 20-23: when count > 0, id of the blockset in which the blocked threads are waiting.
85 const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24;
87 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
88 ecx: &MiriEvalContext<'mir, 'tcx>,
89 mutex_op: OpTy<'tcx, Tag>,
90 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
91 get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
94 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
95 ecx: &mut MiriEvalContext<'mir, 'tcx>,
96 mutex_op: OpTy<'tcx, Tag>,
97 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
98 ) -> InterpResult<'tcx, ()> {
99 set_at_offset(ecx, mutex_op, 4, locked_count, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
102 fn mutex_get_owner<'mir, 'tcx: 'mir>(
103 ecx: &MiriEvalContext<'mir, 'tcx>,
104 mutex_op: OpTy<'tcx, Tag>,
105 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
106 get_at_offset(ecx, mutex_op, 8, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
109 fn mutex_set_owner<'mir, 'tcx: 'mir>(
110 ecx: &mut MiriEvalContext<'mir, 'tcx>,
111 mutex_op: OpTy<'tcx, Tag>,
112 owner: impl Into<ScalarMaybeUndef<Tag>>,
113 ) -> InterpResult<'tcx, ()> {
114 set_at_offset(ecx, mutex_op, 8, owner, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
117 fn mutex_get_kind<'mir, 'tcx: 'mir>(
118 ecx: &mut MiriEvalContext<'mir, 'tcx>,
119 mutex_op: OpTy<'tcx, Tag>,
120 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
121 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
122 get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
125 fn mutex_set_kind<'mir, 'tcx: 'mir>(
126 ecx: &mut MiriEvalContext<'mir, 'tcx>,
127 mutex_op: OpTy<'tcx, Tag>,
128 kind: impl Into<ScalarMaybeUndef<Tag>>,
129 ) -> InterpResult<'tcx, ()> {
130 let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
131 set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
134 fn mutex_get_blockset<'mir, 'tcx: 'mir>(
135 ecx: &MiriEvalContext<'mir, 'tcx>,
136 mutex_op: OpTy<'tcx, Tag>,
137 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
138 get_at_offset(ecx, mutex_op, 20, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
141 fn mutex_set_blockset<'mir, 'tcx: 'mir>(
142 ecx: &mut MiriEvalContext<'mir, 'tcx>,
143 mutex_op: OpTy<'tcx, Tag>,
144 blockset: impl Into<ScalarMaybeUndef<Tag>>,
145 ) -> InterpResult<'tcx, ()> {
146 set_at_offset(ecx, mutex_op, 20, blockset, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
149 fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>(
150 ecx: &mut MiriEvalContext<'mir, 'tcx>,
151 mutex_op: OpTy<'tcx, Tag>,
152 ) -> InterpResult<'tcx, BlockSetId> {
153 let blockset = mutex_get_blockset(ecx, mutex_op)?.to_u32()?;
155 // 0 is a default value and also not a valid blockset id. Need to
156 // allocate a new blockset.
157 let blockset = ecx.create_blockset()?;
158 mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?;
165 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
167 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
168 // bytes 0-3: reserved for signature on macOS
169 // (need to avoid this because it is set by static initializer macros)
170 // bytes 4-7: reader count, as a u32
171 // bytes 8-11: writer count, as a u32
172 // bytes 12-15: when writer or reader count > 0, id of the blockset in which the
173 // blocked writers are waiting.
174 // bytes 16-20: when writer count > 0, id of the blockset in which the blocked
175 // readers are waiting.
177 const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 20;
179 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
180 ecx: &MiriEvalContext<'mir, 'tcx>,
181 rwlock_op: OpTy<'tcx, Tag>,
182 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
183 get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
186 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
187 ecx: &mut MiriEvalContext<'mir, 'tcx>,
188 rwlock_op: OpTy<'tcx, Tag>,
189 readers: impl Into<ScalarMaybeUndef<Tag>>,
190 ) -> InterpResult<'tcx, ()> {
191 set_at_offset(ecx, rwlock_op, 4, readers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
194 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
195 ecx: &MiriEvalContext<'mir, 'tcx>,
196 rwlock_op: OpTy<'tcx, Tag>,
197 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
198 get_at_offset(ecx, rwlock_op, 8, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
201 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
202 ecx: &mut MiriEvalContext<'mir, 'tcx>,
203 rwlock_op: OpTy<'tcx, Tag>,
204 writers: impl Into<ScalarMaybeUndef<Tag>>,
205 ) -> InterpResult<'tcx, ()> {
206 set_at_offset(ecx, rwlock_op, 8, writers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
209 fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>(
210 ecx: &MiriEvalContext<'mir, 'tcx>,
211 rwlock_op: OpTy<'tcx, Tag>,
212 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
213 get_at_offset(ecx, rwlock_op, 12, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
216 fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>(
217 ecx: &mut MiriEvalContext<'mir, 'tcx>,
218 rwlock_op: OpTy<'tcx, Tag>,
219 blockset: impl Into<ScalarMaybeUndef<Tag>>,
220 ) -> InterpResult<'tcx, ()> {
221 set_at_offset(ecx, rwlock_op, 12, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
224 fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>(
225 ecx: &mut MiriEvalContext<'mir, 'tcx>,
226 rwlock_op: OpTy<'tcx, Tag>,
227 ) -> InterpResult<'tcx, BlockSetId> {
228 let blockset = rwlock_get_writer_blockset(ecx, rwlock_op)?.to_u32()?;
230 // 0 is a default value and also not a valid blockset id. Need to
231 // allocate a new blockset.
232 let blockset = ecx.create_blockset()?;
233 rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
240 fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>(
241 ecx: &MiriEvalContext<'mir, 'tcx>,
242 rwlock_op: OpTy<'tcx, Tag>,
243 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
244 get_at_offset(ecx, rwlock_op, 16, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
247 fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>(
248 ecx: &mut MiriEvalContext<'mir, 'tcx>,
249 rwlock_op: OpTy<'tcx, Tag>,
250 blockset: impl Into<ScalarMaybeUndef<Tag>>,
251 ) -> InterpResult<'tcx, ()> {
252 set_at_offset(ecx, rwlock_op, 16, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
255 fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>(
256 ecx: &mut MiriEvalContext<'mir, 'tcx>,
257 rwlock_op: OpTy<'tcx, Tag>,
258 ) -> InterpResult<'tcx, BlockSetId> {
259 let blockset = rwlock_get_reader_blockset(ecx, rwlock_op)?.to_u32()?;
261 // 0 is a default value and also not a valid blockset id. Need to
262 // allocate a new blockset.
263 let blockset = ecx.create_blockset()?;
264 rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
271 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
272 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
273 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
274 let this = self.eval_context_mut();
276 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
277 mutexattr_set_kind(this, attr_op, default_kind)?;
282 fn pthread_mutexattr_settype(
284 attr_op: OpTy<'tcx, Tag>,
285 kind_op: OpTy<'tcx, Tag>,
286 ) -> InterpResult<'tcx, i32> {
287 let this = self.eval_context_mut();
289 let kind = this.read_scalar(kind_op)?.not_undef()?;
290 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
291 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
292 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
294 mutexattr_set_kind(this, attr_op, kind)?;
296 let einval = this.eval_libc_i32("EINVAL")?;
303 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
304 let this = self.eval_context_mut();
306 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
311 fn pthread_mutex_init(
313 mutex_op: OpTy<'tcx, Tag>,
314 attr_op: OpTy<'tcx, Tag>,
315 ) -> InterpResult<'tcx, i32> {
316 let this = self.eval_context_mut();
318 let attr = this.read_scalar(attr_op)?.not_undef()?;
319 let kind = if this.is_null(attr)? {
320 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
322 mutexattr_get_kind(this, attr_op)?.not_undef()?
325 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
326 mutex_set_kind(this, mutex_op, kind)?;
331 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
332 let this = self.eval_context_mut();
334 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
335 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
336 let active_thread = this.get_active_thread()?;
338 if locked_count == 0 {
339 // The mutex is unlocked. Let's lock it.
340 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
341 mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
344 // The mutex is locked. Let's check by whom.
345 let owner_thread: ThreadId =
346 mutex_get_owner(this, mutex_op)?.not_undef()?.to_u32()?.into();
347 if owner_thread != active_thread {
348 // Block the active thread.
349 let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
350 this.block_active_thread(blockset)?;
353 // Trying to acquire the same mutex again.
354 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
355 throw_machine_stop!(TerminationInfo::Deadlock);
356 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
357 this.eval_libc_i32("EDEADLK")
358 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
359 match locked_count.checked_add(1) {
361 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
364 None => this.eval_libc_i32("EAGAIN"),
367 throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex");
373 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
374 let this = self.eval_context_mut();
376 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
377 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
378 let active_thread = this.get_active_thread()?;
380 if locked_count == 0 {
381 // The mutex is unlocked. Let's lock it.
382 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
383 mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
386 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
387 if owner_thread != active_thread {
388 this.eval_libc_i32("EBUSY")
390 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
391 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
393 this.eval_libc_i32("EBUSY")
394 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
395 match locked_count.checked_add(1) {
397 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
400 None => this.eval_libc_i32("EAGAIN"),
404 "called pthread_mutex_trylock on an unsupported type of mutex"
411 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
412 let this = self.eval_context_mut();
414 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
415 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
416 let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
418 if owner_thread != this.get_active_thread()? {
419 throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread");
420 } else if locked_count == 1 {
421 let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
422 if let Some(new_owner) = this.unblock_some_thread(blockset)? {
423 // We have at least one thread waiting on this mutex. Transfer
425 mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?;
427 // No thread is waiting on this mutex.
428 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
432 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
433 throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked");
434 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
435 this.eval_libc_i32("EPERM")
436 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
437 match locked_count.checked_sub(1) {
439 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
443 // locked_count was already zero
444 this.eval_libc_i32("EPERM")
448 throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex");
453 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
454 let this = self.eval_context_mut();
456 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
457 throw_ub_format!("destroyed a locked mutex");
460 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
461 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
462 mutex_set_blockset(this, mutex_op, ScalarMaybeUndef::Undef)?;
467 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
468 let this = self.eval_context_mut();
470 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
471 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
474 // The lock is locked by a writer.
475 assert_eq!(writers, 1);
476 let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
477 this.block_active_thread(reader_blockset)?;
480 match readers.checked_add(1) {
481 Some(new_readers) => {
482 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
485 None => this.eval_libc_i32("EAGAIN"),
490 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
491 let this = self.eval_context_mut();
493 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
494 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
496 this.eval_libc_i32("EBUSY")
498 match readers.checked_add(1) {
499 Some(new_readers) => {
500 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
503 None => this.eval_libc_i32("EAGAIN"),
508 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
509 let this = self.eval_context_mut();
511 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
512 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
513 let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
514 if readers != 0 || writers != 0 {
515 this.block_active_thread(writer_blockset)?;
517 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
522 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
523 let this = self.eval_context_mut();
525 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
526 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
527 if readers != 0 || writers != 0 {
528 this.eval_libc_i32("EBUSY")
530 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
535 // FIXME: We should check that this lock was locked by the active thread.
536 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
537 let this = self.eval_context_mut();
539 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
540 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
541 let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
542 if let Some(new_readers) = readers.checked_sub(1) {
543 assert_eq!(writers, 0);
544 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
545 if new_readers == 0 {
546 if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
547 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
551 } else if writers != 0 {
552 let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
553 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
554 if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
555 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
558 while let Some(_reader) = this.unblock_some_thread(reader_blockset)? {
561 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))?
565 throw_ub_format!("unlocked an rwlock that was not locked");
569 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
570 let this = self.eval_context_mut();
572 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0
573 || rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0
575 throw_ub_format!("destroyed a locked rwlock");
578 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
579 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
580 rwlock_set_reader_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?;
581 rwlock_set_writer_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?;