1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
8 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
9 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
10 let this = self.eval_context_mut();
12 let attr = this.read_scalar(attr_op)?.not_undef()?;
13 if this.is_null(attr)? {
14 return this.eval_libc_i32("EINVAL");
17 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
18 mutexattr_set_kind(this, attr_op, default_kind)?;
23 fn pthread_mutexattr_settype(
25 attr_op: OpTy<'tcx, Tag>,
26 kind_op: OpTy<'tcx, Tag>,
27 ) -> InterpResult<'tcx, i32> {
28 let this = self.eval_context_mut();
30 let attr = this.read_scalar(attr_op)?.not_undef()?;
31 if this.is_null(attr)? {
32 return this.eval_libc_i32("EINVAL");
35 let kind = this.read_scalar(kind_op)?.not_undef()?;
36 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
37 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
38 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
40 mutexattr_set_kind(this, attr_op, kind)?;
42 let einval = this.eval_libc_i32("EINVAL")?;
49 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
50 let this = self.eval_context_mut();
52 let attr = this.read_scalar(attr_op)?.not_undef()?;
53 if this.is_null(attr)? {
54 return this.eval_libc_i32("EINVAL");
57 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
62 fn pthread_mutex_init(
64 mutex_op: OpTy<'tcx, Tag>,
65 attr_op: OpTy<'tcx, Tag>,
66 ) -> InterpResult<'tcx, i32> {
67 let this = self.eval_context_mut();
69 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
70 if this.is_null(mutex)? {
71 return this.eval_libc_i32("EINVAL");
74 let attr = this.read_scalar(attr_op)?.not_undef()?;
75 let kind = if this.is_null(attr)? {
76 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
78 mutexattr_get_kind(this, attr_op)?.not_undef()?
81 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
82 mutex_set_kind(this, mutex_op, kind)?;
87 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
88 let this = self.eval_context_mut();
90 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
91 if this.is_null(mutex)? {
92 return this.eval_libc_i32("EINVAL");
95 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
96 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
98 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
99 if locked_count == 0 {
100 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
103 throw_unsup_format!("Deadlock due to locking a PTHREAD_MUTEX_NORMAL mutex twice");
105 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
106 if locked_count == 0 {
107 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
110 this.eval_libc_i32("EDEADLK")
112 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
113 match locked_count.checked_add(1) {
115 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
118 None => this.eval_libc_i32("EAGAIN"),
121 this.eval_libc_i32("EINVAL")
125 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
126 let this = self.eval_context_mut();
128 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
129 if this.is_null(mutex)? {
130 return this.eval_libc_i32("EINVAL");
133 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
134 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
136 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
137 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
139 if locked_count == 0 {
140 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
143 this.eval_libc_i32("EBUSY")
145 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
146 match locked_count.checked_add(1) {
148 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
151 None => this.eval_libc_i32("EAGAIN"),
154 this.eval_libc_i32("EINVAL")
158 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
159 let this = self.eval_context_mut();
161 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
162 if this.is_null(mutex)? {
163 return this.eval_libc_i32("EINVAL");
166 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
167 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
169 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
170 if locked_count == 1 {
171 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
175 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
178 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
179 if locked_count == 1 {
180 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
183 this.eval_libc_i32("EPERM")
185 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
186 match locked_count.checked_sub(1) {
188 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
192 // locked_count was already zero
193 this.eval_libc_i32("EPERM")
197 this.eval_libc_i32("EINVAL")
201 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
202 let this = self.eval_context_mut();
204 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
205 if this.is_null(mutex)? {
206 return this.eval_libc_i32("EINVAL");
209 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
210 return this.eval_libc_i32("EBUSY");
213 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
214 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
219 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
220 let this = self.eval_context_mut();
222 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
223 if this.is_null(rwlock)? {
224 return this.eval_libc_i32("EINVAL");
227 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
228 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
231 "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"
234 match readers.checked_add(1) {
235 Some(new_readers) => {
236 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
239 None => this.eval_libc_i32("EAGAIN"),
244 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
245 let this = self.eval_context_mut();
247 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
248 if this.is_null(rwlock)? {
249 return this.eval_libc_i32("EINVAL");
252 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
253 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
255 this.eval_libc_i32("EBUSY")
257 match readers.checked_add(1) {
258 Some(new_readers) => {
259 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
262 None => this.eval_libc_i32("EAGAIN"),
267 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
268 let this = self.eval_context_mut();
270 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
271 if this.is_null(rwlock)? {
272 return this.eval_libc_i32("EINVAL");
275 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
276 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
279 "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"
281 } else if writers != 0 {
283 "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"
286 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
291 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
292 let this = self.eval_context_mut();
294 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
295 if this.is_null(rwlock)? {
296 return this.eval_libc_i32("EINVAL");
299 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
300 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
301 if readers != 0 || writers != 0 {
302 this.eval_libc_i32("EBUSY")
304 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
309 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
310 let this = self.eval_context_mut();
312 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
313 if this.is_null(rwlock)? {
314 return this.eval_libc_i32("EINVAL");
317 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
318 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
319 if let Some(new_readers) = readers.checked_sub(1) {
320 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
322 } else if writers != 0 {
323 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
326 this.eval_libc_i32("EPERM")
330 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
331 let this = self.eval_context_mut();
333 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
334 if this.is_null(rwlock)? {
335 return this.eval_libc_i32("EINVAL");
338 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
339 return this.eval_libc_i32("EBUSY");
341 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
342 return this.eval_libc_i32("EBUSY");
345 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
346 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
352 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
353 ecx: &MiriEvalContext<'mir, 'tcx>,
354 operand: OpTy<'tcx, Tag>,
356 ) -> InterpResult<'tcx, ()> {
357 let target_ty = match operand.layout.ty.kind {
358 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
359 _ => panic!("Argument to pthread function was not a raw pointer"),
361 let target_layout = ecx.layout_of(target_ty)?;
362 assert!(target_layout.size.bytes() >= min_size);
366 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform
367 // memory layout: store an i32 in the first four bytes equal to the
368 // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
370 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
371 ecx: &MiriEvalContext<'mir, 'tcx>,
372 attr_op: OpTy<'tcx, Tag>,
373 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
374 // Ensure that the following read at an offset to the attr pointer is within bounds
375 assert_ptr_target_min_size(ecx, attr_op, 4)?;
376 let attr_place = ecx.deref_operand(attr_op)?;
377 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
378 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
379 ecx.read_scalar(kind_place.into())
382 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
383 ecx: &mut MiriEvalContext<'mir, 'tcx>,
384 attr_op: OpTy<'tcx, Tag>,
385 kind: impl Into<ScalarMaybeUndef<Tag>>,
386 ) -> InterpResult<'tcx, ()> {
387 // Ensure that the following write at an offset to the attr pointer is within bounds
388 assert_ptr_target_min_size(ecx, attr_op, 4)?;
389 let attr_place = ecx.deref_operand(attr_op)?;
390 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
391 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
392 ecx.write_scalar(kind.into(), kind_place.into())
395 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform
397 // bytes 0-3: reserved for signature on macOS
398 // bytes 4-7: count of how many times this mutex has been locked, as a u32
399 // bytes 12-15: mutex kind, as an i32
400 // (the kind should be at this offset for compatibility with the static
401 // initializer macro)
403 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
404 ecx: &MiriEvalContext<'mir, 'tcx>,
405 mutex_op: OpTy<'tcx, Tag>,
406 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
407 // Ensure that the following read at an offset to the mutex pointer is within bounds
408 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
409 let mutex_place = ecx.deref_operand(mutex_op)?;
410 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
411 let locked_count_place =
412 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
413 ecx.read_scalar(locked_count_place.into())
416 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
417 ecx: &mut MiriEvalContext<'mir, 'tcx>,
418 mutex_op: OpTy<'tcx, Tag>,
419 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
420 ) -> InterpResult<'tcx, ()> {
421 // Ensure that the following write at an offset to the mutex pointer is within bounds
422 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
423 let mutex_place = ecx.deref_operand(mutex_op)?;
424 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
425 let locked_count_place =
426 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
427 ecx.write_scalar(locked_count.into(), locked_count_place.into())
430 fn mutex_get_kind<'mir, 'tcx: 'mir>(
431 ecx: &MiriEvalContext<'mir, 'tcx>,
432 mutex_op: OpTy<'tcx, Tag>,
433 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
434 // Ensure that the following read at an offset to the mutex pointer is within bounds
435 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
436 let mutex_place = ecx.deref_operand(mutex_op)?;
437 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
439 mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?;
440 ecx.read_scalar(kind_place.into())
443 fn mutex_set_kind<'mir, 'tcx: 'mir>(
444 ecx: &mut MiriEvalContext<'mir, 'tcx>,
445 mutex_op: OpTy<'tcx, Tag>,
446 kind: impl Into<ScalarMaybeUndef<Tag>>,
447 ) -> InterpResult<'tcx, ()> {
448 // Ensure that the following write at an offset to the mutex pointer is within bounds
449 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
450 let mutex_place = ecx.deref_operand(mutex_op)?;
451 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
453 mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?;
454 ecx.write_scalar(kind.into(), kind_place.into())
457 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform
459 // bytes 0-3: reserved for signature on macOS
460 // bytes 4-7: reader count, as a u32
461 // bytes 8-11: writer count, as a u32
463 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
464 ecx: &MiriEvalContext<'mir, 'tcx>,
465 rwlock_op: OpTy<'tcx, Tag>,
466 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
467 // Ensure that the following read at an offset to the rwlock pointer is within bounds
468 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
469 let rwlock_place = ecx.deref_operand(rwlock_op)?;
470 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
472 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
473 ecx.read_scalar(readers_place.into())
476 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
477 ecx: &mut MiriEvalContext<'mir, 'tcx>,
478 rwlock_op: OpTy<'tcx, Tag>,
479 readers: impl Into<ScalarMaybeUndef<Tag>>,
480 ) -> InterpResult<'tcx, ()> {
481 // Ensure that the following write at an offset to the rwlock pointer is within bounds
482 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
483 let rwlock_place = ecx.deref_operand(rwlock_op)?;
484 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
486 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
487 ecx.write_scalar(readers.into(), readers_place.into())
490 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
491 ecx: &MiriEvalContext<'mir, 'tcx>,
492 rwlock_op: OpTy<'tcx, Tag>,
493 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
494 // Ensure that the following read at an offset to the rwlock pointer is within bounds
495 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
496 let rwlock_place = ecx.deref_operand(rwlock_op)?;
497 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
499 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
500 ecx.read_scalar(writers_place.into())
503 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
504 ecx: &mut MiriEvalContext<'mir, 'tcx>,
505 rwlock_op: OpTy<'tcx, Tag>,
506 writers: impl Into<ScalarMaybeUndef<Tag>>,
507 ) -> InterpResult<'tcx, ()> {
508 // Ensure that the following write at an offset to the rwlock pointer is within bounds
509 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
510 let rwlock_place = ecx.deref_operand(rwlock_op)?;
511 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
513 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
514 ecx.write_scalar(writers.into(), writers_place.into())