1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
8 ecx: &MiriEvalContext<'mir, 'tcx>,
9 operand: OpTy<'tcx, Tag>,
11 ) -> InterpResult<'tcx, ()> {
12 let target_ty = match operand.layout.ty.kind {
13 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
14 _ => panic!("Argument to pthread function was not a raw pointer"),
16 let target_layout = ecx.layout_of(target_ty)?;
17 assert!(target_layout.size.bytes() >= min_size);
21 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
23 // Our chosen memory layout for emulation (does not have to match the platform layout!):
24 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
25 // (e.g. PTHREAD_MUTEX_NORMAL).
27 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
28 ecx: &MiriEvalContext<'mir, 'tcx>,
29 attr_op: OpTy<'tcx, Tag>,
30 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
31 // Ensure that the following read at an offset to the attr pointer is within bounds
32 assert_ptr_target_min_size(ecx, attr_op, 4)?;
33 let attr_place = ecx.deref_operand(attr_op)?;
34 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
35 ecx.read_scalar(kind_place.into())
38 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
39 ecx: &mut MiriEvalContext<'mir, 'tcx>,
40 attr_op: OpTy<'tcx, Tag>,
41 kind: impl Into<ScalarMaybeUndef<Tag>>,
42 ) -> InterpResult<'tcx, ()> {
43 // Ensure that the following write at an offset to the attr pointer is within bounds
44 assert_ptr_target_min_size(ecx, attr_op, 4)?;
45 let attr_place = ecx.deref_operand(attr_op)?;
46 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
47 ecx.write_scalar(kind.into(), kind_place.into())
50 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
52 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
53 // bytes 0-3: reserved for signature on macOS
54 // (need to avoid this because it is set by static initializer macros)
55 // bytes 4-7: count of how many times this mutex has been locked, as a u32
56 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
57 // (the kind has to be at its offset for compatibility with static initializer macros)
59 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
60 ecx: &MiriEvalContext<'mir, 'tcx>,
61 mutex_op: OpTy<'tcx, Tag>,
62 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
63 // Ensure that the following read at an offset to the mutex pointer is within bounds
64 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
65 let mutex_place = ecx.deref_operand(mutex_op)?;
66 let locked_count_place =
67 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
68 ecx.read_scalar(locked_count_place.into())
71 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
72 ecx: &mut MiriEvalContext<'mir, 'tcx>,
73 mutex_op: OpTy<'tcx, Tag>,
74 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
75 ) -> InterpResult<'tcx, ()> {
76 // Ensure that the following write at an offset to the mutex pointer is within bounds
77 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
78 let mutex_place = ecx.deref_operand(mutex_op)?;
79 let locked_count_place =
80 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
81 ecx.write_scalar(locked_count.into(), locked_count_place.into())
84 fn mutex_get_kind<'mir, 'tcx: 'mir>(
85 ecx: &mut MiriEvalContext<'mir, 'tcx>,
86 mutex_op: OpTy<'tcx, Tag>,
87 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
88 // Ensure that the following read at an offset to the mutex pointer is within bounds
89 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
90 let mutex_place = ecx.deref_operand(mutex_op)?;
91 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
92 let kind_place = mutex_place.offset(
93 Size::from_bytes(kind_offset),
98 ecx.read_scalar(kind_place.into())
101 fn mutex_set_kind<'mir, 'tcx: 'mir>(
102 ecx: &mut MiriEvalContext<'mir, 'tcx>,
103 mutex_op: OpTy<'tcx, Tag>,
104 kind: impl Into<ScalarMaybeUndef<Tag>>,
105 ) -> InterpResult<'tcx, ()> {
106 // Ensure that the following write at an offset to the mutex pointer is within bounds
107 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
108 let mutex_place = ecx.deref_operand(mutex_op)?;
109 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
110 let kind_place = mutex_place.offset(
111 Size::from_bytes(kind_offset),
116 ecx.write_scalar(kind.into(), kind_place.into())
119 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
121 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
122 // bytes 0-3: reserved for signature on macOS
123 // (need to avoid this because it is set by static initializer macros)
124 // bytes 4-7: reader count, as a u32
125 // bytes 8-11: writer count, as a u32
127 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
128 ecx: &MiriEvalContext<'mir, 'tcx>,
129 rwlock_op: OpTy<'tcx, Tag>,
130 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
131 // Ensure that the following read at an offset to the rwlock pointer is within bounds
132 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
133 let rwlock_place = ecx.deref_operand(rwlock_op)?;
135 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
136 ecx.read_scalar(readers_place.into())
139 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
140 ecx: &mut MiriEvalContext<'mir, 'tcx>,
141 rwlock_op: OpTy<'tcx, Tag>,
142 readers: impl Into<ScalarMaybeUndef<Tag>>,
143 ) -> InterpResult<'tcx, ()> {
144 // Ensure that the following write at an offset to the rwlock pointer is within bounds
145 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
146 let rwlock_place = ecx.deref_operand(rwlock_op)?;
148 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
149 ecx.write_scalar(readers.into(), readers_place.into())
152 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
153 ecx: &MiriEvalContext<'mir, 'tcx>,
154 rwlock_op: OpTy<'tcx, Tag>,
155 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
156 // Ensure that the following read at an offset to the rwlock pointer is within bounds
157 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
158 let rwlock_place = ecx.deref_operand(rwlock_op)?;
160 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
161 ecx.read_scalar(writers_place.into())
164 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
165 ecx: &mut MiriEvalContext<'mir, 'tcx>,
166 rwlock_op: OpTy<'tcx, Tag>,
167 writers: impl Into<ScalarMaybeUndef<Tag>>,
168 ) -> InterpResult<'tcx, ()> {
169 // Ensure that the following write at an offset to the rwlock pointer is within bounds
170 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
171 let rwlock_place = ecx.deref_operand(rwlock_op)?;
173 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
174 ecx.write_scalar(writers.into(), writers_place.into())
177 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
178 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
179 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
180 let this = self.eval_context_mut();
182 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
183 mutexattr_set_kind(this, attr_op, default_kind)?;
188 fn pthread_mutexattr_settype(
190 attr_op: OpTy<'tcx, Tag>,
191 kind_op: OpTy<'tcx, Tag>,
192 ) -> InterpResult<'tcx, i32> {
193 let this = self.eval_context_mut();
195 let kind = this.read_scalar(kind_op)?.not_undef()?;
196 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
197 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
198 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
200 mutexattr_set_kind(this, attr_op, kind)?;
202 let einval = this.eval_libc_i32("EINVAL")?;
209 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
210 let this = self.eval_context_mut();
212 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
217 fn pthread_mutex_init(
219 mutex_op: OpTy<'tcx, Tag>,
220 attr_op: OpTy<'tcx, Tag>,
221 ) -> InterpResult<'tcx, i32> {
222 let this = self.eval_context_mut();
224 let attr = this.read_scalar(attr_op)?.not_undef()?;
225 let kind = if this.is_null(attr)? {
226 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
228 mutexattr_get_kind(this, attr_op)?.not_undef()?
231 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
232 mutex_set_kind(this, mutex_op, kind)?;
237 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
238 let this = self.eval_context_mut();
240 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
241 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
243 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
244 if locked_count == 0 {
245 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
248 throw_machine_stop!(TerminationInfo::Deadlock);
250 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
251 if locked_count == 0 {
252 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
255 this.eval_libc_i32("EDEADLK")
257 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
258 match locked_count.checked_add(1) {
260 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
263 None => this.eval_libc_i32("EAGAIN"),
266 this.eval_libc_i32("EINVAL")
270 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
271 let this = self.eval_context_mut();
273 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
274 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
276 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
277 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
279 if locked_count == 0 {
280 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
283 this.eval_libc_i32("EBUSY")
285 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
286 match locked_count.checked_add(1) {
288 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
291 None => this.eval_libc_i32("EAGAIN"),
294 this.eval_libc_i32("EINVAL")
298 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
299 let this = self.eval_context_mut();
301 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
302 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
304 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
305 if locked_count != 0 {
306 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
310 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
313 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
314 if locked_count != 0 {
315 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
318 this.eval_libc_i32("EPERM")
320 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
321 match locked_count.checked_sub(1) {
323 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
327 // locked_count was already zero
328 this.eval_libc_i32("EPERM")
332 this.eval_libc_i32("EINVAL")
336 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
337 let this = self.eval_context_mut();
339 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
340 return this.eval_libc_i32("EBUSY");
343 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
344 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
349 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
350 let this = self.eval_context_mut();
352 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
353 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
355 throw_machine_stop!(TerminationInfo::Deadlock);
357 match readers.checked_add(1) {
358 Some(new_readers) => {
359 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
362 None => this.eval_libc_i32("EAGAIN"),
367 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
368 let this = self.eval_context_mut();
370 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
371 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
373 this.eval_libc_i32("EBUSY")
375 match readers.checked_add(1) {
376 Some(new_readers) => {
377 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
380 None => this.eval_libc_i32("EAGAIN"),
385 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
386 let this = self.eval_context_mut();
388 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
389 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
391 throw_machine_stop!(TerminationInfo::Deadlock);
392 } else if writers != 0 {
393 throw_machine_stop!(TerminationInfo::Deadlock);
395 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
400 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
401 let this = self.eval_context_mut();
403 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
404 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
405 if readers != 0 || writers != 0 {
406 this.eval_libc_i32("EBUSY")
408 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
413 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
414 let this = self.eval_context_mut();
416 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
417 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
418 if let Some(new_readers) = readers.checked_sub(1) {
419 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
421 } else if writers != 0 {
422 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
425 this.eval_libc_i32("EPERM")
429 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
430 let this = self.eval_context_mut();
432 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
433 return this.eval_libc_i32("EBUSY");
435 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
436 return this.eval_libc_i32("EBUSY");
439 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
440 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;