1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
8 ecx: &MiriEvalContext<'mir, 'tcx>,
9 operand: OpTy<'tcx, Tag>,
11 ) -> InterpResult<'tcx, ()> {
12 let target_ty = match operand.layout.ty.kind {
13 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
14 _ => panic!("Argument to pthread function was not a raw pointer"),
16 let target_layout = ecx.layout_of(target_ty)?;
17 assert!(target_layout.size.bytes() >= min_size);
21 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
23 // Our chosen memory layout for emulation (does not have to match the platform layout!):
24 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
25 // (e.g. PTHREAD_MUTEX_NORMAL).
27 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
28 ecx: &MiriEvalContext<'mir, 'tcx>,
29 attr_op: OpTy<'tcx, Tag>,
30 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
31 // Ensure that the following read at an offset to the attr pointer is within bounds
32 assert_ptr_target_min_size(ecx, attr_op, 4)?;
33 let attr_place = ecx.deref_operand(attr_op)?;
34 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
35 ecx.read_scalar(kind_place.into())
38 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
39 ecx: &mut MiriEvalContext<'mir, 'tcx>,
40 attr_op: OpTy<'tcx, Tag>,
41 kind: impl Into<ScalarMaybeUndef<Tag>>,
42 ) -> InterpResult<'tcx, ()> {
43 // Ensure that the following write at an offset to the attr pointer is within bounds
44 assert_ptr_target_min_size(ecx, attr_op, 4)?;
45 let attr_place = ecx.deref_operand(attr_op)?;
46 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
47 ecx.write_scalar(kind.into(), kind_place.into())
50 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
52 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
53 // bytes 0-3: reserved for signature on macOS
54 // (need to avoid this because it is set by static initializer macros)
55 // bytes 4-7: count of how many times this mutex has been locked, as a u32
56 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
57 // (the kind has to be at its offset for compatibility with static initializer macros)
59 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
60 ecx: &MiriEvalContext<'mir, 'tcx>,
61 mutex_op: OpTy<'tcx, Tag>,
62 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
63 // Ensure that the following read at an offset to the mutex pointer is within bounds
64 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
65 let mutex_place = ecx.deref_operand(mutex_op)?;
66 let locked_count_place =
67 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
68 ecx.read_scalar(locked_count_place.into())
71 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
72 ecx: &mut MiriEvalContext<'mir, 'tcx>,
73 mutex_op: OpTy<'tcx, Tag>,
74 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
75 ) -> InterpResult<'tcx, ()> {
76 // Ensure that the following write at an offset to the mutex pointer is within bounds
77 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
78 let mutex_place = ecx.deref_operand(mutex_op)?;
79 let locked_count_place =
80 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
81 ecx.write_scalar(locked_count.into(), locked_count_place.into())
84 fn mutex_get_kind<'mir, 'tcx: 'mir>(
85 ecx: &mut MiriEvalContext<'mir, 'tcx>,
86 mutex_op: OpTy<'tcx, Tag>,
87 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
88 // Ensure that the following read at an offset to the mutex pointer is within bounds
89 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
90 let mutex_place = ecx.deref_operand(mutex_op)?;
91 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
92 let kind_place = mutex_place.offset(
93 Size::from_bytes(kind_offset),
98 ecx.read_scalar(kind_place.into())
101 fn mutex_set_kind<'mir, 'tcx: 'mir>(
102 ecx: &mut MiriEvalContext<'mir, 'tcx>,
103 mutex_op: OpTy<'tcx, Tag>,
104 kind: impl Into<ScalarMaybeUndef<Tag>>,
105 ) -> InterpResult<'tcx, ()> {
106 // Ensure that the following write at an offset to the mutex pointer is within bounds
107 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
108 let mutex_place = ecx.deref_operand(mutex_op)?;
109 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
110 let kind_place = mutex_place.offset(
111 Size::from_bytes(kind_offset),
116 ecx.write_scalar(kind.into(), kind_place.into())
119 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
121 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
122 // bytes 0-3: reserved for signature on macOS
123 // (need to avoid this because it is set by static initializer macros)
124 // bytes 4-7: reader count, as a u32
125 // bytes 8-11: writer count, as a u32
127 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
128 ecx: &MiriEvalContext<'mir, 'tcx>,
129 rwlock_op: OpTy<'tcx, Tag>,
130 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
131 // Ensure that the following read at an offset to the rwlock pointer is within bounds
132 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
133 let rwlock_place = ecx.deref_operand(rwlock_op)?;
135 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
136 ecx.read_scalar(readers_place.into())
139 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
140 ecx: &mut MiriEvalContext<'mir, 'tcx>,
141 rwlock_op: OpTy<'tcx, Tag>,
142 readers: impl Into<ScalarMaybeUndef<Tag>>,
143 ) -> InterpResult<'tcx, ()> {
144 // Ensure that the following write at an offset to the rwlock pointer is within bounds
145 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
146 let rwlock_place = ecx.deref_operand(rwlock_op)?;
148 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
149 ecx.write_scalar(readers.into(), readers_place.into())
152 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
153 ecx: &MiriEvalContext<'mir, 'tcx>,
154 rwlock_op: OpTy<'tcx, Tag>,
155 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
156 // Ensure that the following read at an offset to the rwlock pointer is within bounds
157 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
158 let rwlock_place = ecx.deref_operand(rwlock_op)?;
160 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
161 ecx.read_scalar(writers_place.into())
164 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
165 ecx: &mut MiriEvalContext<'mir, 'tcx>,
166 rwlock_op: OpTy<'tcx, Tag>,
167 writers: impl Into<ScalarMaybeUndef<Tag>>,
168 ) -> InterpResult<'tcx, ()> {
169 // Ensure that the following write at an offset to the rwlock pointer is within bounds
170 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
171 let rwlock_place = ecx.deref_operand(rwlock_op)?;
173 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
174 ecx.write_scalar(writers.into(), writers_place.into())
177 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
178 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
179 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
180 let this = self.eval_context_mut();
182 let attr = this.read_scalar(attr_op)?.not_undef()?;
183 if this.is_null(attr)? {
184 return this.eval_libc_i32("EINVAL");
187 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
188 mutexattr_set_kind(this, attr_op, default_kind)?;
193 fn pthread_mutexattr_settype(
195 attr_op: OpTy<'tcx, Tag>,
196 kind_op: OpTy<'tcx, Tag>,
197 ) -> InterpResult<'tcx, i32> {
198 let this = self.eval_context_mut();
200 let attr = this.read_scalar(attr_op)?.not_undef()?;
201 if this.is_null(attr)? {
202 return this.eval_libc_i32("EINVAL");
205 let kind = this.read_scalar(kind_op)?.not_undef()?;
206 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
207 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
208 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
210 mutexattr_set_kind(this, attr_op, kind)?;
212 let einval = this.eval_libc_i32("EINVAL")?;
219 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
220 let this = self.eval_context_mut();
222 let attr = this.read_scalar(attr_op)?.not_undef()?;
223 if this.is_null(attr)? {
224 return this.eval_libc_i32("EINVAL");
227 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
232 fn pthread_mutex_init(
234 mutex_op: OpTy<'tcx, Tag>,
235 attr_op: OpTy<'tcx, Tag>,
236 ) -> InterpResult<'tcx, i32> {
237 let this = self.eval_context_mut();
239 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
240 if this.is_null(mutex)? {
241 return this.eval_libc_i32("EINVAL");
244 let attr = this.read_scalar(attr_op)?.not_undef()?;
245 let kind = if this.is_null(attr)? {
246 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
248 mutexattr_get_kind(this, attr_op)?.not_undef()?
251 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
252 mutex_set_kind(this, mutex_op, kind)?;
257 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
258 let this = self.eval_context_mut();
260 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
261 if this.is_null(mutex)? {
262 return this.eval_libc_i32("EINVAL");
265 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
266 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
268 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
269 if locked_count == 0 {
270 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
273 throw_machine_stop!(TerminationInfo::Deadlock);
275 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
276 if locked_count == 0 {
277 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
280 this.eval_libc_i32("EDEADLK")
282 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
283 match locked_count.checked_add(1) {
285 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
288 None => this.eval_libc_i32("EAGAIN"),
291 this.eval_libc_i32("EINVAL")
295 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
296 let this = self.eval_context_mut();
298 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
299 if this.is_null(mutex)? {
300 return this.eval_libc_i32("EINVAL");
303 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
304 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
306 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
307 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
309 if locked_count == 0 {
310 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
313 this.eval_libc_i32("EBUSY")
315 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
316 match locked_count.checked_add(1) {
318 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
321 None => this.eval_libc_i32("EAGAIN"),
324 this.eval_libc_i32("EINVAL")
328 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
329 let this = self.eval_context_mut();
331 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
332 if this.is_null(mutex)? {
333 return this.eval_libc_i32("EINVAL");
336 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
337 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
339 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
340 if locked_count != 0 {
341 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
345 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
348 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
349 if locked_count != 0 {
350 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
353 this.eval_libc_i32("EPERM")
355 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
356 match locked_count.checked_sub(1) {
358 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
362 // locked_count was already zero
363 this.eval_libc_i32("EPERM")
367 this.eval_libc_i32("EINVAL")
371 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
372 let this = self.eval_context_mut();
374 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
375 if this.is_null(mutex)? {
376 return this.eval_libc_i32("EINVAL");
379 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
380 return this.eval_libc_i32("EBUSY");
383 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
384 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
389 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
390 let this = self.eval_context_mut();
392 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
393 if this.is_null(rwlock)? {
394 return this.eval_libc_i32("EINVAL");
397 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
398 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
401 "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"
404 match readers.checked_add(1) {
405 Some(new_readers) => {
406 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
409 None => this.eval_libc_i32("EAGAIN"),
414 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
415 let this = self.eval_context_mut();
417 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
418 if this.is_null(rwlock)? {
419 return this.eval_libc_i32("EINVAL");
422 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
423 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
425 this.eval_libc_i32("EBUSY")
427 match readers.checked_add(1) {
428 Some(new_readers) => {
429 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
432 None => this.eval_libc_i32("EAGAIN"),
437 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
438 let this = self.eval_context_mut();
440 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
441 if this.is_null(rwlock)? {
442 return this.eval_libc_i32("EINVAL");
445 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
446 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
449 "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"
451 } else if writers != 0 {
453 "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"
456 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
461 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
462 let this = self.eval_context_mut();
464 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
465 if this.is_null(rwlock)? {
466 return this.eval_libc_i32("EINVAL");
469 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
470 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
471 if readers != 0 || writers != 0 {
472 this.eval_libc_i32("EBUSY")
474 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
479 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
480 let this = self.eval_context_mut();
482 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
483 if this.is_null(rwlock)? {
484 return this.eval_libc_i32("EINVAL");
487 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
488 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
489 if let Some(new_readers) = readers.checked_sub(1) {
490 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
492 } else if writers != 0 {
493 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
496 this.eval_libc_i32("EPERM")
500 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
501 let this = self.eval_context_mut();
503 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
504 if this.is_null(rwlock)? {
505 return this.eval_libc_i32("EINVAL");
508 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
509 return this.eval_libc_i32("EBUSY");
511 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
512 return this.eval_libc_i32("EBUSY");
515 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
516 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;