1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
8 ecx: &MiriEvalContext<'mir, 'tcx>,
9 operand: OpTy<'tcx, Tag>,
11 ) -> InterpResult<'tcx, ()> {
12 let target_ty = match operand.layout.ty.kind {
13 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
14 _ => panic!("Argument to pthread function was not a raw pointer"),
16 let target_layout = ecx.layout_of(target_ty)?;
17 assert!(target_layout.size.bytes() >= min_size);
21 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
23 // Our chosen memory layout for emulation (does not have to match the platform layout!):
24 // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
25 // (e.g. PTHREAD_MUTEX_NORMAL).
27 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
28 ecx: &MiriEvalContext<'mir, 'tcx>,
29 attr_op: OpTy<'tcx, Tag>,
30 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
31 // Ensure that the following read at an offset to the attr pointer is within bounds
32 assert_ptr_target_min_size(ecx, attr_op, 4)?;
33 let attr_place = ecx.deref_operand(attr_op)?;
35 attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?;
36 ecx.read_scalar(kind_place.into())
39 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
40 ecx: &mut MiriEvalContext<'mir, 'tcx>,
41 attr_op: OpTy<'tcx, Tag>,
42 kind: impl Into<ScalarMaybeUndef<Tag>>,
43 ) -> InterpResult<'tcx, ()> {
44 // Ensure that the following write at an offset to the attr pointer is within bounds
45 assert_ptr_target_min_size(ecx, attr_op, 4)?;
46 let attr_place = ecx.deref_operand(attr_op)?;
48 attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?;
49 ecx.write_scalar(kind.into(), kind_place.into())
52 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
54 // Our chosen memory layout for the emulated mutex (does not have to match the platform layout!):
55 // bytes 0-3: reserved for signature on macOS
56 // (need to avoid this because it is set by static initializer macros)
57 // bytes 4-7: count of how many times this mutex has been locked, as a u32
58 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
59 // (the kind has to be at its offset for compatibility with static initializer macros)
61 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
62 ecx: &MiriEvalContext<'mir, 'tcx>,
63 mutex_op: OpTy<'tcx, Tag>,
64 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
65 // Ensure that the following read at an offset to the mutex pointer is within bounds
66 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
67 let mutex_place = ecx.deref_operand(mutex_op)?;
68 let locked_count_place = mutex_place.offset(
71 ecx.machine.layouts.u32,
74 ecx.read_scalar(locked_count_place.into())
77 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
78 ecx: &mut MiriEvalContext<'mir, 'tcx>,
79 mutex_op: OpTy<'tcx, Tag>,
80 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
81 ) -> InterpResult<'tcx, ()> {
82 // Ensure that the following write at an offset to the mutex pointer is within bounds
83 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
84 let mutex_place = ecx.deref_operand(mutex_op)?;
85 let locked_count_place = mutex_place.offset(
88 ecx.machine.layouts.u32,
91 ecx.write_scalar(locked_count.into(), locked_count_place.into())
94 fn mutex_get_kind<'mir, 'tcx: 'mir>(
95 ecx: &mut MiriEvalContext<'mir, 'tcx>,
96 mutex_op: OpTy<'tcx, Tag>,
97 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
98 // Ensure that the following read at an offset to the mutex pointer is within bounds
99 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
100 let mutex_place = ecx.deref_operand(mutex_op)?;
101 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
102 let kind_place = mutex_place.offset(
103 Size::from_bytes(kind_offset),
105 ecx.machine.layouts.i32,
108 ecx.read_scalar(kind_place.into())
111 fn mutex_set_kind<'mir, 'tcx: 'mir>(
112 ecx: &mut MiriEvalContext<'mir, 'tcx>,
113 mutex_op: OpTy<'tcx, Tag>,
114 kind: impl Into<ScalarMaybeUndef<Tag>>,
115 ) -> InterpResult<'tcx, ()> {
116 // Ensure that the following write at an offset to the mutex pointer is within bounds
117 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
118 let mutex_place = ecx.deref_operand(mutex_op)?;
119 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
120 let kind_place = mutex_place.offset(
121 Size::from_bytes(kind_offset),
123 ecx.machine.layouts.i32,
126 ecx.write_scalar(kind.into(), kind_place.into())
129 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
131 // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!):
132 // bytes 0-3: reserved for signature on macOS
133 // (need to avoid this because it is set by static initializer macros)
134 // bytes 4-7: reader count, as a u32
135 // bytes 8-11: writer count, as a u32
137 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
138 ecx: &MiriEvalContext<'mir, 'tcx>,
139 rwlock_op: OpTy<'tcx, Tag>,
140 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
141 // Ensure that the following read at an offset to the rwlock pointer is within bounds
142 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
143 let rwlock_place = ecx.deref_operand(rwlock_op)?;
144 let readers_place = rwlock_place.offset(
147 ecx.machine.layouts.u32,
150 ecx.read_scalar(readers_place.into())
153 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
154 ecx: &mut MiriEvalContext<'mir, 'tcx>,
155 rwlock_op: OpTy<'tcx, Tag>,
156 readers: impl Into<ScalarMaybeUndef<Tag>>,
157 ) -> InterpResult<'tcx, ()> {
158 // Ensure that the following write at an offset to the rwlock pointer is within bounds
159 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
160 let rwlock_place = ecx.deref_operand(rwlock_op)?;
161 let readers_place = rwlock_place.offset(
164 ecx.machine.layouts.u32,
167 ecx.write_scalar(readers.into(), readers_place.into())
170 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
171 ecx: &MiriEvalContext<'mir, 'tcx>,
172 rwlock_op: OpTy<'tcx, Tag>,
173 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
174 // Ensure that the following read at an offset to the rwlock pointer is within bounds
175 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
176 let rwlock_place = ecx.deref_operand(rwlock_op)?;
177 let writers_place = rwlock_place.offset(
180 ecx.machine.layouts.u32,
183 ecx.read_scalar(writers_place.into())
186 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
187 ecx: &mut MiriEvalContext<'mir, 'tcx>,
188 rwlock_op: OpTy<'tcx, Tag>,
189 writers: impl Into<ScalarMaybeUndef<Tag>>,
190 ) -> InterpResult<'tcx, ()> {
191 // Ensure that the following write at an offset to the rwlock pointer is within bounds
192 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
193 let rwlock_place = ecx.deref_operand(rwlock_op)?;
194 let writers_place = rwlock_place.offset(
197 ecx.machine.layouts.u32,
200 ecx.write_scalar(writers.into(), writers_place.into())
203 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
204 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
205 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
206 let this = self.eval_context_mut();
208 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
209 mutexattr_set_kind(this, attr_op, default_kind)?;
214 fn pthread_mutexattr_settype(
216 attr_op: OpTy<'tcx, Tag>,
217 kind_op: OpTy<'tcx, Tag>,
218 ) -> InterpResult<'tcx, i32> {
219 let this = self.eval_context_mut();
221 let kind = this.read_scalar(kind_op)?.not_undef()?;
222 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
223 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
224 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
226 mutexattr_set_kind(this, attr_op, kind)?;
228 let einval = this.eval_libc_i32("EINVAL")?;
235 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
236 let this = self.eval_context_mut();
238 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
243 fn pthread_mutex_init(
245 mutex_op: OpTy<'tcx, Tag>,
246 attr_op: OpTy<'tcx, Tag>,
247 ) -> InterpResult<'tcx, i32> {
248 let this = self.eval_context_mut();
250 let attr = this.read_scalar(attr_op)?.not_undef()?;
251 let kind = if this.is_null(attr)? {
252 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
254 mutexattr_get_kind(this, attr_op)?.not_undef()?
257 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
258 mutex_set_kind(this, mutex_op, kind)?;
263 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
264 let this = self.eval_context_mut();
266 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
267 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
269 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
270 if locked_count == 0 {
271 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
274 throw_machine_stop!(TerminationInfo::Deadlock);
276 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
277 if locked_count == 0 {
278 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
281 this.eval_libc_i32("EDEADLK")
283 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
284 match locked_count.checked_add(1) {
286 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
289 None => this.eval_libc_i32("EAGAIN"),
292 throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex");
296 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
297 let this = self.eval_context_mut();
299 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
300 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
302 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
303 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
305 if locked_count == 0 {
306 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
309 this.eval_libc_i32("EBUSY")
311 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
312 match locked_count.checked_add(1) {
314 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
317 None => this.eval_libc_i32("EAGAIN"),
320 throw_ub_format!("called pthread_mutex_trylock on an unsupported type of mutex");
324 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
325 let this = self.eval_context_mut();
327 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
328 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
330 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
331 if locked_count != 0 {
332 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
335 throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked");
337 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
338 if locked_count != 0 {
339 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
342 this.eval_libc_i32("EPERM")
344 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
345 match locked_count.checked_sub(1) {
347 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
351 // locked_count was already zero
352 this.eval_libc_i32("EPERM")
356 throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex");
360 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
361 let this = self.eval_context_mut();
363 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
364 throw_ub_format!("destroyed a locked mutex");
367 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
368 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
373 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
374 let this = self.eval_context_mut();
376 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
377 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
379 throw_machine_stop!(TerminationInfo::Deadlock);
381 match readers.checked_add(1) {
382 Some(new_readers) => {
383 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
386 None => this.eval_libc_i32("EAGAIN"),
391 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
392 let this = self.eval_context_mut();
394 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
395 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
397 this.eval_libc_i32("EBUSY")
399 match readers.checked_add(1) {
400 Some(new_readers) => {
401 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
404 None => this.eval_libc_i32("EAGAIN"),
409 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
410 let this = self.eval_context_mut();
412 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
413 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
415 throw_machine_stop!(TerminationInfo::Deadlock);
416 } else if writers != 0 {
417 throw_machine_stop!(TerminationInfo::Deadlock);
419 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
424 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
425 let this = self.eval_context_mut();
427 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
428 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
429 if readers != 0 || writers != 0 {
430 this.eval_libc_i32("EBUSY")
432 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
437 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
438 let this = self.eval_context_mut();
440 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
441 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
442 if let Some(new_readers) = readers.checked_sub(1) {
443 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
445 } else if writers != 0 {
446 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
449 throw_ub_format!("unlocked an rwlock that was not locked");
453 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
454 let this = self.eval_context_mut();
456 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0
457 || rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0
459 throw_ub_format!("destroyed a locked rwlock");
462 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
463 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;