1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
8 ecx: &MiriEvalContext<'mir, 'tcx>,
9 operand: OpTy<'tcx, Tag>,
11 ) -> InterpResult<'tcx, ()> {
12 let target_ty = match operand.layout.ty.kind {
13 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
14 _ => panic!("Argument to pthread function was not a raw pointer"),
16 let target_layout = ecx.layout_of(target_ty)?;
17 assert!(target_layout.size.bytes() >= min_size);
21 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
23 // Our chosen memory layout: store an i32 in the first four bytes equal to the
24 // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
26 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
27 ecx: &MiriEvalContext<'mir, 'tcx>,
28 attr_op: OpTy<'tcx, Tag>,
29 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
30 // Ensure that the following read at an offset to the attr pointer is within bounds
31 assert_ptr_target_min_size(ecx, attr_op, 4)?;
32 let attr_place = ecx.deref_operand(attr_op)?;
33 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
34 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
35 ecx.read_scalar(kind_place.into())
38 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
39 ecx: &mut MiriEvalContext<'mir, 'tcx>,
40 attr_op: OpTy<'tcx, Tag>,
41 kind: impl Into<ScalarMaybeUndef<Tag>>,
42 ) -> InterpResult<'tcx, ()> {
43 // Ensure that the following write at an offset to the attr pointer is within bounds
44 assert_ptr_target_min_size(ecx, attr_op, 4)?;
45 let attr_place = ecx.deref_operand(attr_op)?;
46 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
47 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
48 ecx.write_scalar(kind.into(), kind_place.into())
51 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
53 // Our chosen memory layout:
54 // bytes 0-3: reserved for signature on macOS
55 // (need to avoid this because it is set by static initializer macros)
56 // bytes 4-7: count of how many times this mutex has been locked, as a u32
57 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
58 // (the kind has to be at its offset for compatibility with static initializer macros)
60 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
61 ecx: &MiriEvalContext<'mir, 'tcx>,
62 mutex_op: OpTy<'tcx, Tag>,
63 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
64 // Ensure that the following read at an offset to the mutex pointer is within bounds
65 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
66 let mutex_place = ecx.deref_operand(mutex_op)?;
67 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
68 let locked_count_place =
69 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
70 ecx.read_scalar(locked_count_place.into())
73 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
74 ecx: &mut MiriEvalContext<'mir, 'tcx>,
75 mutex_op: OpTy<'tcx, Tag>,
76 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
77 ) -> InterpResult<'tcx, ()> {
78 // Ensure that the following write at an offset to the mutex pointer is within bounds
79 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
80 let mutex_place = ecx.deref_operand(mutex_op)?;
81 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
82 let locked_count_place =
83 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
84 ecx.write_scalar(locked_count.into(), locked_count_place.into())
87 fn mutex_get_kind<'mir, 'tcx: 'mir>(
88 ecx: &mut MiriEvalContext<'mir, 'tcx>,
89 mutex_op: OpTy<'tcx, Tag>,
90 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
91 // Ensure that the following read at an offset to the mutex pointer is within bounds
92 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
93 let mutex_place = ecx.deref_operand(mutex_op)?;
94 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
95 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
97 mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?;
98 ecx.read_scalar(kind_place.into())
101 fn mutex_set_kind<'mir, 'tcx: 'mir>(
102 ecx: &mut MiriEvalContext<'mir, 'tcx>,
103 mutex_op: OpTy<'tcx, Tag>,
104 kind: impl Into<ScalarMaybeUndef<Tag>>,
105 ) -> InterpResult<'tcx, ()> {
106 // Ensure that the following write at an offset to the mutex pointer is within bounds
107 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
108 let mutex_place = ecx.deref_operand(mutex_op)?;
109 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
110 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
112 mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?;
113 ecx.write_scalar(kind.into(), kind_place.into())
116 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
118 // Our chosen memory layout:
119 // bytes 0-3: reserved for signature on macOS
120 // (need to avoid this because it is set by static initializer macros)
121 // bytes 4-7: reader count, as a u32
122 // bytes 8-11: writer count, as a u32
124 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
125 ecx: &MiriEvalContext<'mir, 'tcx>,
126 rwlock_op: OpTy<'tcx, Tag>,
127 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
128 // Ensure that the following read at an offset to the rwlock pointer is within bounds
129 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
130 let rwlock_place = ecx.deref_operand(rwlock_op)?;
131 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
133 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
134 ecx.read_scalar(readers_place.into())
137 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
138 ecx: &mut MiriEvalContext<'mir, 'tcx>,
139 rwlock_op: OpTy<'tcx, Tag>,
140 readers: impl Into<ScalarMaybeUndef<Tag>>,
141 ) -> InterpResult<'tcx, ()> {
142 // Ensure that the following write at an offset to the rwlock pointer is within bounds
143 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
144 let rwlock_place = ecx.deref_operand(rwlock_op)?;
145 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
147 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
148 ecx.write_scalar(readers.into(), readers_place.into())
151 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
152 ecx: &MiriEvalContext<'mir, 'tcx>,
153 rwlock_op: OpTy<'tcx, Tag>,
154 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
155 // Ensure that the following read at an offset to the rwlock pointer is within bounds
156 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
157 let rwlock_place = ecx.deref_operand(rwlock_op)?;
158 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
160 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
161 ecx.read_scalar(writers_place.into())
164 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
165 ecx: &mut MiriEvalContext<'mir, 'tcx>,
166 rwlock_op: OpTy<'tcx, Tag>,
167 writers: impl Into<ScalarMaybeUndef<Tag>>,
168 ) -> InterpResult<'tcx, ()> {
169 // Ensure that the following write at an offset to the rwlock pointer is within bounds
170 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
171 let rwlock_place = ecx.deref_operand(rwlock_op)?;
172 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
174 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
175 ecx.write_scalar(writers.into(), writers_place.into())
178 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
179 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
180 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
181 let this = self.eval_context_mut();
183 let attr = this.read_scalar(attr_op)?.not_undef()?;
184 if this.is_null(attr)? {
185 return this.eval_libc_i32("EINVAL");
188 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
189 mutexattr_set_kind(this, attr_op, default_kind)?;
194 fn pthread_mutexattr_settype(
196 attr_op: OpTy<'tcx, Tag>,
197 kind_op: OpTy<'tcx, Tag>,
198 ) -> InterpResult<'tcx, i32> {
199 let this = self.eval_context_mut();
201 let attr = this.read_scalar(attr_op)?.not_undef()?;
202 if this.is_null(attr)? {
203 return this.eval_libc_i32("EINVAL");
206 let kind = this.read_scalar(kind_op)?.not_undef()?;
207 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
208 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
209 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
211 mutexattr_set_kind(this, attr_op, kind)?;
213 let einval = this.eval_libc_i32("EINVAL")?;
220 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
221 let this = self.eval_context_mut();
223 let attr = this.read_scalar(attr_op)?.not_undef()?;
224 if this.is_null(attr)? {
225 return this.eval_libc_i32("EINVAL");
228 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
233 fn pthread_mutex_init(
235 mutex_op: OpTy<'tcx, Tag>,
236 attr_op: OpTy<'tcx, Tag>,
237 ) -> InterpResult<'tcx, i32> {
238 let this = self.eval_context_mut();
240 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
241 if this.is_null(mutex)? {
242 return this.eval_libc_i32("EINVAL");
245 let attr = this.read_scalar(attr_op)?.not_undef()?;
246 let kind = if this.is_null(attr)? {
247 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
249 mutexattr_get_kind(this, attr_op)?.not_undef()?
252 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
253 mutex_set_kind(this, mutex_op, kind)?;
258 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
259 let this = self.eval_context_mut();
261 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
262 if this.is_null(mutex)? {
263 return this.eval_libc_i32("EINVAL");
266 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
267 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
269 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
270 if locked_count == 0 {
271 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
274 throw_machine_stop!(TerminationInfo::Deadlock);
276 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
277 if locked_count == 0 {
278 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
281 this.eval_libc_i32("EDEADLK")
283 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
284 match locked_count.checked_add(1) {
286 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
289 None => this.eval_libc_i32("EAGAIN"),
292 this.eval_libc_i32("EINVAL")
296 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
297 let this = self.eval_context_mut();
299 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
300 if this.is_null(mutex)? {
301 return this.eval_libc_i32("EINVAL");
304 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
305 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
307 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
308 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
310 if locked_count == 0 {
311 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
314 this.eval_libc_i32("EBUSY")
316 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
317 match locked_count.checked_add(1) {
319 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
322 None => this.eval_libc_i32("EAGAIN"),
325 this.eval_libc_i32("EINVAL")
329 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
330 let this = self.eval_context_mut();
332 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
333 if this.is_null(mutex)? {
334 return this.eval_libc_i32("EINVAL");
337 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
338 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
340 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
341 if locked_count != 0 {
342 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
346 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
349 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
350 if locked_count != 0 {
351 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
354 this.eval_libc_i32("EPERM")
356 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
357 match locked_count.checked_sub(1) {
359 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
363 // locked_count was already zero
364 this.eval_libc_i32("EPERM")
368 this.eval_libc_i32("EINVAL")
372 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
373 let this = self.eval_context_mut();
375 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
376 if this.is_null(mutex)? {
377 return this.eval_libc_i32("EINVAL");
380 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
381 return this.eval_libc_i32("EBUSY");
384 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
385 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
390 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
391 let this = self.eval_context_mut();
393 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
394 if this.is_null(rwlock)? {
395 return this.eval_libc_i32("EINVAL");
398 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
399 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
402 "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"
405 match readers.checked_add(1) {
406 Some(new_readers) => {
407 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
410 None => this.eval_libc_i32("EAGAIN"),
415 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
416 let this = self.eval_context_mut();
418 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
419 if this.is_null(rwlock)? {
420 return this.eval_libc_i32("EINVAL");
423 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
424 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
426 this.eval_libc_i32("EBUSY")
428 match readers.checked_add(1) {
429 Some(new_readers) => {
430 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
433 None => this.eval_libc_i32("EAGAIN"),
438 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
439 let this = self.eval_context_mut();
441 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
442 if this.is_null(rwlock)? {
443 return this.eval_libc_i32("EINVAL");
446 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
447 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
450 "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"
452 } else if writers != 0 {
454 "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"
457 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
462 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
463 let this = self.eval_context_mut();
465 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
466 if this.is_null(rwlock)? {
467 return this.eval_libc_i32("EINVAL");
470 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
471 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
472 if readers != 0 || writers != 0 {
473 this.eval_libc_i32("EBUSY")
475 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
480 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
481 let this = self.eval_context_mut();
483 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
484 if this.is_null(rwlock)? {
485 return this.eval_libc_i32("EINVAL");
488 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
489 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
490 if let Some(new_readers) = readers.checked_sub(1) {
491 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
493 } else if writers != 0 {
494 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
497 this.eval_libc_i32("EPERM")
501 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
502 let this = self.eval_context_mut();
504 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
505 if this.is_null(rwlock)? {
506 return this.eval_libc_i32("EINVAL");
509 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
510 return this.eval_libc_i32("EBUSY");
512 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
513 return this.eval_libc_i32("EBUSY");
516 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
517 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;