1 use rustc_middle::ty::{TyKind, TypeAndMut};
2 use rustc_target::abi::{LayoutOf, Size};
4 use crate::stacked_borrows::Tag;
7 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
8 ecx: &MiriEvalContext<'mir, 'tcx>,
9 operand: OpTy<'tcx, Tag>,
11 ) -> InterpResult<'tcx, ()> {
12 let target_ty = match operand.layout.ty.kind {
13 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
14 _ => panic!("Argument to pthread function was not a raw pointer"),
16 let target_layout = ecx.layout_of(target_ty)?;
17 assert!(target_layout.size.bytes() >= min_size);
21 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
23 // Our chosen memory layout: store an i32 in the first four bytes equal to the
24 // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
26 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
27 ecx: &MiriEvalContext<'mir, 'tcx>,
28 attr_op: OpTy<'tcx, Tag>,
29 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
30 // Ensure that the following read at an offset to the attr pointer is within bounds
31 assert_ptr_target_min_size(ecx, attr_op, 4)?;
32 let attr_place = ecx.deref_operand(attr_op)?;
33 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
34 ecx.read_scalar(kind_place.into())
37 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
38 ecx: &mut MiriEvalContext<'mir, 'tcx>,
39 attr_op: OpTy<'tcx, Tag>,
40 kind: impl Into<ScalarMaybeUndef<Tag>>,
41 ) -> InterpResult<'tcx, ()> {
42 // Ensure that the following write at an offset to the attr pointer is within bounds
43 assert_ptr_target_min_size(ecx, attr_op, 4)?;
44 let attr_place = ecx.deref_operand(attr_op)?;
45 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
46 ecx.write_scalar(kind.into(), kind_place.into())
49 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
51 // Our chosen memory layout:
52 // bytes 0-3: reserved for signature on macOS
53 // (need to avoid this because it is set by static initializer macros)
54 // bytes 4-7: count of how many times this mutex has been locked, as a u32
55 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
56 // (the kind has to be at its offset for compatibility with static initializer macros)
58 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
59 ecx: &MiriEvalContext<'mir, 'tcx>,
60 mutex_op: OpTy<'tcx, Tag>,
61 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
62 // Ensure that the following read at an offset to the mutex pointer is within bounds
63 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
64 let mutex_place = ecx.deref_operand(mutex_op)?;
65 let locked_count_place =
66 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
67 ecx.read_scalar(locked_count_place.into())
70 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
71 ecx: &mut MiriEvalContext<'mir, 'tcx>,
72 mutex_op: OpTy<'tcx, Tag>,
73 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
74 ) -> InterpResult<'tcx, ()> {
75 // Ensure that the following write at an offset to the mutex pointer is within bounds
76 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
77 let mutex_place = ecx.deref_operand(mutex_op)?;
78 let locked_count_place =
79 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
80 ecx.write_scalar(locked_count.into(), locked_count_place.into())
83 fn mutex_get_kind<'mir, 'tcx: 'mir>(
84 ecx: &mut MiriEvalContext<'mir, 'tcx>,
85 mutex_op: OpTy<'tcx, Tag>,
86 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
87 // Ensure that the following read at an offset to the mutex pointer is within bounds
88 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
89 let mutex_place = ecx.deref_operand(mutex_op)?;
90 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
91 let kind_place = mutex_place.offset(
92 Size::from_bytes(kind_offset),
97 ecx.read_scalar(kind_place.into())
100 fn mutex_set_kind<'mir, 'tcx: 'mir>(
101 ecx: &mut MiriEvalContext<'mir, 'tcx>,
102 mutex_op: OpTy<'tcx, Tag>,
103 kind: impl Into<ScalarMaybeUndef<Tag>>,
104 ) -> InterpResult<'tcx, ()> {
105 // Ensure that the following write at an offset to the mutex pointer is within bounds
106 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
107 let mutex_place = ecx.deref_operand(mutex_op)?;
108 let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
109 let kind_place = mutex_place.offset(
110 Size::from_bytes(kind_offset),
115 ecx.write_scalar(kind.into(), kind_place.into())
118 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
120 // Our chosen memory layout:
121 // bytes 0-3: reserved for signature on macOS
122 // (need to avoid this because it is set by static initializer macros)
123 // bytes 4-7: reader count, as a u32
124 // bytes 8-11: writer count, as a u32
126 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
127 ecx: &MiriEvalContext<'mir, 'tcx>,
128 rwlock_op: OpTy<'tcx, Tag>,
129 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
130 // Ensure that the following read at an offset to the rwlock pointer is within bounds
131 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
132 let rwlock_place = ecx.deref_operand(rwlock_op)?;
134 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
135 ecx.read_scalar(readers_place.into())
138 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
139 ecx: &mut MiriEvalContext<'mir, 'tcx>,
140 rwlock_op: OpTy<'tcx, Tag>,
141 readers: impl Into<ScalarMaybeUndef<Tag>>,
142 ) -> InterpResult<'tcx, ()> {
143 // Ensure that the following write at an offset to the rwlock pointer is within bounds
144 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
145 let rwlock_place = ecx.deref_operand(rwlock_op)?;
147 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
148 ecx.write_scalar(readers.into(), readers_place.into())
151 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
152 ecx: &MiriEvalContext<'mir, 'tcx>,
153 rwlock_op: OpTy<'tcx, Tag>,
154 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
155 // Ensure that the following read at an offset to the rwlock pointer is within bounds
156 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
157 let rwlock_place = ecx.deref_operand(rwlock_op)?;
159 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
160 ecx.read_scalar(writers_place.into())
163 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
164 ecx: &mut MiriEvalContext<'mir, 'tcx>,
165 rwlock_op: OpTy<'tcx, Tag>,
166 writers: impl Into<ScalarMaybeUndef<Tag>>,
167 ) -> InterpResult<'tcx, ()> {
168 // Ensure that the following write at an offset to the rwlock pointer is within bounds
169 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
170 let rwlock_place = ecx.deref_operand(rwlock_op)?;
172 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
173 ecx.write_scalar(writers.into(), writers_place.into())
176 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
177 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
178 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
179 let this = self.eval_context_mut();
181 let attr = this.read_scalar(attr_op)?.not_undef()?;
182 if this.is_null(attr)? {
183 return this.eval_libc_i32("EINVAL");
186 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
187 mutexattr_set_kind(this, attr_op, default_kind)?;
192 fn pthread_mutexattr_settype(
194 attr_op: OpTy<'tcx, Tag>,
195 kind_op: OpTy<'tcx, Tag>,
196 ) -> InterpResult<'tcx, i32> {
197 let this = self.eval_context_mut();
199 let attr = this.read_scalar(attr_op)?.not_undef()?;
200 if this.is_null(attr)? {
201 return this.eval_libc_i32("EINVAL");
204 let kind = this.read_scalar(kind_op)?.not_undef()?;
205 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
206 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
207 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
209 mutexattr_set_kind(this, attr_op, kind)?;
211 let einval = this.eval_libc_i32("EINVAL")?;
218 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
219 let this = self.eval_context_mut();
221 let attr = this.read_scalar(attr_op)?.not_undef()?;
222 if this.is_null(attr)? {
223 return this.eval_libc_i32("EINVAL");
226 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
231 fn pthread_mutex_init(
233 mutex_op: OpTy<'tcx, Tag>,
234 attr_op: OpTy<'tcx, Tag>,
235 ) -> InterpResult<'tcx, i32> {
236 let this = self.eval_context_mut();
238 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
239 if this.is_null(mutex)? {
240 return this.eval_libc_i32("EINVAL");
243 let attr = this.read_scalar(attr_op)?.not_undef()?;
244 let kind = if this.is_null(attr)? {
245 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
247 mutexattr_get_kind(this, attr_op)?.not_undef()?
250 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
251 mutex_set_kind(this, mutex_op, kind)?;
256 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
257 let this = self.eval_context_mut();
259 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
260 if this.is_null(mutex)? {
261 return this.eval_libc_i32("EINVAL");
264 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
265 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
267 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
268 if locked_count == 0 {
269 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
272 throw_machine_stop!(TerminationInfo::Deadlock);
274 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
275 if locked_count == 0 {
276 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
279 this.eval_libc_i32("EDEADLK")
281 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
282 match locked_count.checked_add(1) {
284 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
287 None => this.eval_libc_i32("EAGAIN"),
290 this.eval_libc_i32("EINVAL")
294 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
295 let this = self.eval_context_mut();
297 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
298 if this.is_null(mutex)? {
299 return this.eval_libc_i32("EINVAL");
302 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
303 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
305 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
306 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
308 if locked_count == 0 {
309 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
312 this.eval_libc_i32("EBUSY")
314 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
315 match locked_count.checked_add(1) {
317 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
320 None => this.eval_libc_i32("EAGAIN"),
323 this.eval_libc_i32("EINVAL")
327 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
328 let this = self.eval_context_mut();
330 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
331 if this.is_null(mutex)? {
332 return this.eval_libc_i32("EINVAL");
335 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
336 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
338 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
339 if locked_count != 0 {
340 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
344 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
347 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
348 if locked_count != 0 {
349 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
352 this.eval_libc_i32("EPERM")
354 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
355 match locked_count.checked_sub(1) {
357 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
361 // locked_count was already zero
362 this.eval_libc_i32("EPERM")
366 this.eval_libc_i32("EINVAL")
370 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
371 let this = self.eval_context_mut();
373 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
374 if this.is_null(mutex)? {
375 return this.eval_libc_i32("EINVAL");
378 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
379 return this.eval_libc_i32("EBUSY");
382 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
383 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
388 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
389 let this = self.eval_context_mut();
391 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
392 if this.is_null(rwlock)? {
393 return this.eval_libc_i32("EINVAL");
396 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
397 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
400 "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"
403 match readers.checked_add(1) {
404 Some(new_readers) => {
405 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
408 None => this.eval_libc_i32("EAGAIN"),
413 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
414 let this = self.eval_context_mut();
416 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
417 if this.is_null(rwlock)? {
418 return this.eval_libc_i32("EINVAL");
421 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
422 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
424 this.eval_libc_i32("EBUSY")
426 match readers.checked_add(1) {
427 Some(new_readers) => {
428 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
431 None => this.eval_libc_i32("EAGAIN"),
436 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
437 let this = self.eval_context_mut();
439 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
440 if this.is_null(rwlock)? {
441 return this.eval_libc_i32("EINVAL");
444 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
445 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
448 "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"
450 } else if writers != 0 {
452 "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"
455 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
460 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
461 let this = self.eval_context_mut();
463 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
464 if this.is_null(rwlock)? {
465 return this.eval_libc_i32("EINVAL");
468 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
469 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
470 if readers != 0 || writers != 0 {
471 this.eval_libc_i32("EBUSY")
473 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
478 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
479 let this = self.eval_context_mut();
481 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
482 if this.is_null(rwlock)? {
483 return this.eval_libc_i32("EINVAL");
486 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
487 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
488 if let Some(new_readers) = readers.checked_sub(1) {
489 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
491 } else if writers != 0 {
492 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
495 this.eval_libc_i32("EPERM")
499 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
500 let this = self.eval_context_mut();
502 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
503 if this.is_null(rwlock)? {
504 return this.eval_libc_i32("EINVAL");
507 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
508 return this.eval_libc_i32("EBUSY");
510 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
511 return this.eval_libc_i32("EBUSY");
514 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
515 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;