1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
8 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
9 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
10 let this = self.eval_context_mut();
12 let attr = this.read_scalar(attr_op)?.not_undef()?;
13 if this.is_null(attr)? {
14 return this.eval_libc_i32("EINVAL");
17 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
18 mutexattr_set_kind(this, attr_op, default_kind)?;
23 fn pthread_mutexattr_settype(
25 attr_op: OpTy<'tcx, Tag>,
26 kind_op: OpTy<'tcx, Tag>,
27 ) -> InterpResult<'tcx, i32> {
28 let this = self.eval_context_mut();
30 let attr = this.read_scalar(attr_op)?.not_undef()?;
31 if this.is_null(attr)? {
32 return this.eval_libc_i32("EINVAL");
35 let kind = this.read_scalar(kind_op)?.not_undef()?;
36 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
37 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
38 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
40 mutexattr_set_kind(this, attr_op, kind)?;
42 let einval = this.eval_libc_i32("EINVAL")?;
49 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
50 let this = self.eval_context_mut();
52 let attr = this.read_scalar(attr_op)?.not_undef()?;
53 if this.is_null(attr)? {
54 return this.eval_libc_i32("EINVAL");
57 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
62 fn pthread_mutex_init(
64 mutex_op: OpTy<'tcx, Tag>,
65 attr_op: OpTy<'tcx, Tag>,
66 ) -> InterpResult<'tcx, i32> {
67 let this = self.eval_context_mut();
69 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
70 if this.is_null(mutex)? {
71 return this.eval_libc_i32("EINVAL");
74 let attr = this.read_scalar(attr_op)?.not_undef()?;
75 let kind = if this.is_null(attr)? {
76 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
78 mutexattr_get_kind(this, attr_op)?.not_undef()?
81 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
82 mutex_set_kind(this, mutex_op, kind)?;
87 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
88 let this = self.eval_context_mut();
90 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
91 if this.is_null(mutex)? {
92 return this.eval_libc_i32("EINVAL");
95 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
96 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
98 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
99 if locked_count == 0 {
100 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
103 throw_unsup_format!("Deadlock due to locking a PTHREAD_MUTEX_NORMAL mutex twice");
105 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
106 if locked_count == 0 {
107 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
110 this.eval_libc_i32("EDEADLK")
112 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
113 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count + 1))?;
116 this.eval_libc_i32("EINVAL")
120 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
121 let this = self.eval_context_mut();
123 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
124 if this.is_null(mutex)? {
125 return this.eval_libc_i32("EINVAL");
128 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
129 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
131 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
132 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
134 if locked_count == 0 {
135 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
138 this.eval_libc_i32("EBUSY")
140 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
141 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count + 1))?;
144 this.eval_libc_i32("EINVAL")
148 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
149 let this = self.eval_context_mut();
151 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
152 if this.is_null(mutex)? {
153 return this.eval_libc_i32("EINVAL");
156 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
157 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
159 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
160 if locked_count == 1 {
161 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
165 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
168 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
169 if locked_count == 1 {
170 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
173 this.eval_libc_i32("EPERM")
175 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
176 if locked_count > 0 {
177 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count - 1))?;
180 this.eval_libc_i32("EPERM")
183 this.eval_libc_i32("EINVAL")
187 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
188 let this = self.eval_context_mut();
190 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
191 if this.is_null(mutex)? {
192 return this.eval_libc_i32("EINVAL");
195 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
196 return this.eval_libc_i32("EBUSY");
199 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
200 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
205 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
206 let this = self.eval_context_mut();
208 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
209 if this.is_null(rwlock)? {
210 return this.eval_libc_i32("EINVAL");
213 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
214 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
217 "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"
220 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers + 1))?;
225 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
226 let this = self.eval_context_mut();
228 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
229 if this.is_null(rwlock)? {
230 return this.eval_libc_i32("EINVAL");
233 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
234 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
236 this.eval_libc_i32("EBUSY")
238 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers + 1))?;
243 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
244 let this = self.eval_context_mut();
246 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
247 if this.is_null(rwlock)? {
248 return this.eval_libc_i32("EINVAL");
251 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
252 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
255 "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"
257 } else if writers != 0 {
259 "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"
262 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
267 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
268 let this = self.eval_context_mut();
270 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
271 if this.is_null(rwlock)? {
272 return this.eval_libc_i32("EINVAL");
275 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
276 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
277 if readers != 0 || writers != 0 {
278 this.eval_libc_i32("EBUSY")
280 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
285 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
286 let this = self.eval_context_mut();
288 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
289 if this.is_null(rwlock)? {
290 return this.eval_libc_i32("EINVAL");
293 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
294 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
296 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers - 1))?;
298 } else if writers != 0 {
299 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
302 this.eval_libc_i32("EPERM")
306 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
307 let this = self.eval_context_mut();
309 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
310 if this.is_null(rwlock)? {
311 return this.eval_libc_i32("EINVAL");
314 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
315 return this.eval_libc_i32("EBUSY");
317 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
318 return this.eval_libc_i32("EBUSY");
321 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
322 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
328 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
329 ecx: &MiriEvalContext<'mir, 'tcx>,
330 operand: OpTy<'tcx, Tag>,
332 ) -> InterpResult<'tcx, ()> {
333 let target_ty = match operand.layout.ty.kind {
334 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
335 _ => panic!("Argument to pthread function was not a raw pointer"),
337 let target_layout = ecx.layout_of(target_ty)?;
338 assert!(target_layout.size.bytes() >= min_size);
342 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform
343 // memory layout: store an i32 in the first four bytes equal to the
344 // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
346 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
347 ecx: &MiriEvalContext<'mir, 'tcx>,
348 attr_op: OpTy<'tcx, Tag>,
349 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
350 // Ensure that the following read at an offset to the attr pointer is within bounds
351 assert_ptr_target_min_size(ecx, attr_op, 4)?;
352 let attr_place = ecx.deref_operand(attr_op)?;
353 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
354 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
355 ecx.read_scalar(kind_place.into())
358 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
359 ecx: &mut MiriEvalContext<'mir, 'tcx>,
360 attr_op: OpTy<'tcx, Tag>,
361 kind: impl Into<ScalarMaybeUndef<Tag>>,
362 ) -> InterpResult<'tcx, ()> {
363 // Ensure that the following write at an offset to the attr pointer is within bounds
364 assert_ptr_target_min_size(ecx, attr_op, 4)?;
365 let attr_place = ecx.deref_operand(attr_op)?;
366 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
367 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
368 ecx.write_scalar(kind.into(), kind_place.into())
371 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform
373 // bytes 0-3: reserved for signature on macOS
374 // bytes 4-7: count of how many times this mutex has been locked, as a u32
375 // bytes 12-15: mutex kind, as an i32
376 // (the kind should be at this offset for compatibility with the static
377 // initializer macro)
379 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
380 ecx: &MiriEvalContext<'mir, 'tcx>,
381 mutex_op: OpTy<'tcx, Tag>,
382 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
383 // Ensure that the following read at an offset to the mutex pointer is within bounds
384 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
385 let mutex_place = ecx.deref_operand(mutex_op)?;
386 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
387 let locked_count_place =
388 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
389 ecx.read_scalar(locked_count_place.into())
392 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
393 ecx: &mut MiriEvalContext<'mir, 'tcx>,
394 mutex_op: OpTy<'tcx, Tag>,
395 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
396 ) -> InterpResult<'tcx, ()> {
397 // Ensure that the following write at an offset to the mutex pointer is within bounds
398 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
399 let mutex_place = ecx.deref_operand(mutex_op)?;
400 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
401 let locked_count_place =
402 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
403 ecx.write_scalar(locked_count.into(), locked_count_place.into())
406 fn mutex_get_kind<'mir, 'tcx: 'mir>(
407 ecx: &MiriEvalContext<'mir, 'tcx>,
408 mutex_op: OpTy<'tcx, Tag>,
409 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
410 // Ensure that the following read at an offset to the mutex pointer is within bounds
411 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
412 let mutex_place = ecx.deref_operand(mutex_op)?;
413 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
415 mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?;
416 ecx.read_scalar(kind_place.into())
419 fn mutex_set_kind<'mir, 'tcx: 'mir>(
420 ecx: &mut MiriEvalContext<'mir, 'tcx>,
421 mutex_op: OpTy<'tcx, Tag>,
422 kind: impl Into<ScalarMaybeUndef<Tag>>,
423 ) -> InterpResult<'tcx, ()> {
424 // Ensure that the following write at an offset to the mutex pointer is within bounds
425 assert_ptr_target_min_size(ecx, mutex_op, 16)?;
426 let mutex_place = ecx.deref_operand(mutex_op)?;
427 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
429 mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?;
430 ecx.write_scalar(kind.into(), kind_place.into())
433 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform
435 // bytes 0-3: reserved for signature on macOS
436 // bytes 4-7: reader count, as a u32
437 // bytes 8-11: writer count, as a u32
439 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
440 ecx: &MiriEvalContext<'mir, 'tcx>,
441 rwlock_op: OpTy<'tcx, Tag>,
442 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
443 // Ensure that the following read at an offset to the rwlock pointer is within bounds
444 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
445 let rwlock_place = ecx.deref_operand(rwlock_op)?;
446 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
448 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
449 ecx.read_scalar(readers_place.into())
452 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
453 ecx: &mut MiriEvalContext<'mir, 'tcx>,
454 rwlock_op: OpTy<'tcx, Tag>,
455 readers: impl Into<ScalarMaybeUndef<Tag>>,
456 ) -> InterpResult<'tcx, ()> {
457 // Ensure that the following write at an offset to the rwlock pointer is within bounds
458 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
459 let rwlock_place = ecx.deref_operand(rwlock_op)?;
460 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
462 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
463 ecx.write_scalar(readers.into(), readers_place.into())
466 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
467 ecx: &MiriEvalContext<'mir, 'tcx>,
468 rwlock_op: OpTy<'tcx, Tag>,
469 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
470 // Ensure that the following read at an offset to the rwlock pointer is within bounds
471 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
472 let rwlock_place = ecx.deref_operand(rwlock_op)?;
473 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
475 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
476 ecx.read_scalar(writers_place.into())
479 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
480 ecx: &mut MiriEvalContext<'mir, 'tcx>,
481 rwlock_op: OpTy<'tcx, Tag>,
482 writers: impl Into<ScalarMaybeUndef<Tag>>,
483 ) -> InterpResult<'tcx, ()> {
484 // Ensure that the following write at an offset to the rwlock pointer is within bounds
485 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
486 let rwlock_place = ecx.deref_operand(rwlock_op)?;
487 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
489 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
490 ecx.write_scalar(writers.into(), writers_place.into())