1 use std::sync::atomic::{AtomicU64, Ordering};
3 use rustc_middle::ty::{TyKind, TypeAndMut};
4 use rustc_target::abi::{FieldsShape, LayoutOf, Size};
6 use crate::stacked_borrows::Tag;
9 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
10 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
11 fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
12 let this = self.eval_context_mut();
14 let attr = this.read_scalar(attr_op)?.not_undef()?;
15 if this.is_null(attr)? {
16 return this.eval_libc_i32("EINVAL");
19 let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?;
20 mutexattr_set_kind(this, attr_op, default_kind)?;
25 fn pthread_mutexattr_settype(
27 attr_op: OpTy<'tcx, Tag>,
28 kind_op: OpTy<'tcx, Tag>,
29 ) -> InterpResult<'tcx, i32> {
30 let this = self.eval_context_mut();
32 let attr = this.read_scalar(attr_op)?.not_undef()?;
33 if this.is_null(attr)? {
34 return this.eval_libc_i32("EINVAL");
37 let kind = this.read_scalar(kind_op)?.not_undef()?;
38 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
39 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
40 || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")?
42 mutexattr_set_kind(this, attr_op, kind)?;
44 let einval = this.eval_libc_i32("EINVAL")?;
51 fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
52 let this = self.eval_context_mut();
54 let attr = this.read_scalar(attr_op)?.not_undef()?;
55 if this.is_null(attr)? {
56 return this.eval_libc_i32("EINVAL");
59 mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?;
64 fn pthread_mutex_init(
66 mutex_op: OpTy<'tcx, Tag>,
67 attr_op: OpTy<'tcx, Tag>,
68 ) -> InterpResult<'tcx, i32> {
69 let this = self.eval_context_mut();
71 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
72 if this.is_null(mutex)? {
73 return this.eval_libc_i32("EINVAL");
76 let attr = this.read_scalar(attr_op)?.not_undef()?;
77 let kind = if this.is_null(attr)? {
78 this.eval_libc("PTHREAD_MUTEX_DEFAULT")?
80 mutexattr_get_kind(this, attr_op)?.not_undef()?
83 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
84 mutex_set_kind(this, mutex_op, kind)?;
89 fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
90 let this = self.eval_context_mut();
92 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
93 if this.is_null(mutex)? {
94 return this.eval_libc_i32("EINVAL");
97 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
98 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
100 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
101 if locked_count == 0 {
102 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
105 throw_unsup_format!("Deadlock due to locking a PTHREAD_MUTEX_NORMAL mutex twice");
107 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
108 if locked_count == 0 {
109 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
112 this.eval_libc_i32("EDEADLK")
114 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
115 match locked_count.checked_add(1) {
117 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
120 None => this.eval_libc_i32("EAGAIN"),
123 this.eval_libc_i32("EINVAL")
127 fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
128 let this = self.eval_context_mut();
130 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
131 if this.is_null(mutex)? {
132 return this.eval_libc_i32("EINVAL");
135 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
136 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
138 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
139 || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
141 if locked_count == 0 {
142 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
145 this.eval_libc_i32("EBUSY")
147 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
148 match locked_count.checked_add(1) {
150 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
153 None => this.eval_libc_i32("EAGAIN"),
156 this.eval_libc_i32("EINVAL")
160 fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
161 let this = self.eval_context_mut();
163 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
164 if this.is_null(mutex)? {
165 return this.eval_libc_i32("EINVAL");
168 let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
169 let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
171 if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
172 if locked_count != 0 {
173 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
177 "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"
180 } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
181 if locked_count != 0 {
182 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
185 this.eval_libc_i32("EPERM")
187 } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
188 match locked_count.checked_sub(1) {
190 mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
194 // locked_count was already zero
195 this.eval_libc_i32("EPERM")
199 this.eval_libc_i32("EINVAL")
203 fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
204 let this = self.eval_context_mut();
206 let mutex = this.read_scalar(mutex_op)?.not_undef()?;
207 if this.is_null(mutex)? {
208 return this.eval_libc_i32("EINVAL");
211 if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 {
212 return this.eval_libc_i32("EBUSY");
215 mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
216 mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
221 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
222 let this = self.eval_context_mut();
224 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
225 if this.is_null(rwlock)? {
226 return this.eval_libc_i32("EINVAL");
229 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
230 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
233 "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"
236 match readers.checked_add(1) {
237 Some(new_readers) => {
238 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
241 None => this.eval_libc_i32("EAGAIN"),
246 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
247 let this = self.eval_context_mut();
249 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
250 if this.is_null(rwlock)? {
251 return this.eval_libc_i32("EINVAL");
254 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
255 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
257 this.eval_libc_i32("EBUSY")
259 match readers.checked_add(1) {
260 Some(new_readers) => {
261 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
264 None => this.eval_libc_i32("EAGAIN"),
269 fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
270 let this = self.eval_context_mut();
272 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
273 if this.is_null(rwlock)? {
274 return this.eval_libc_i32("EINVAL");
277 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
278 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
281 "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"
283 } else if writers != 0 {
285 "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"
288 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
293 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
294 let this = self.eval_context_mut();
296 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
297 if this.is_null(rwlock)? {
298 return this.eval_libc_i32("EINVAL");
301 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
302 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
303 if readers != 0 || writers != 0 {
304 this.eval_libc_i32("EBUSY")
306 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
311 fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
312 let this = self.eval_context_mut();
314 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
315 if this.is_null(rwlock)? {
316 return this.eval_libc_i32("EINVAL");
319 let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
320 let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
321 if let Some(new_readers) = readers.checked_sub(1) {
322 rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
324 } else if writers != 0 {
325 rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
328 this.eval_libc_i32("EPERM")
332 fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
333 let this = self.eval_context_mut();
335 let rwlock = this.read_scalar(rwlock_op)?.not_undef()?;
336 if this.is_null(rwlock)? {
337 return this.eval_libc_i32("EINVAL");
340 if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 {
341 return this.eval_libc_i32("EBUSY");
343 if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 {
344 return this.eval_libc_i32("EBUSY");
347 rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
348 rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
354 fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
355 ecx: &MiriEvalContext<'mir, 'tcx>,
356 operand: OpTy<'tcx, Tag>,
358 ) -> InterpResult<'tcx, ()> {
359 let target_ty = match operand.layout.ty.kind {
360 TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
361 _ => panic!("Argument to pthread function was not a raw pointer"),
363 let target_layout = ecx.layout_of(target_ty)?;
364 assert!(target_layout.size.bytes() >= min_size);
368 // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
370 // Our chosen memory layout: store an i32 in the first four bytes equal to the
371 // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
373 fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
374 ecx: &MiriEvalContext<'mir, 'tcx>,
375 attr_op: OpTy<'tcx, Tag>,
376 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
377 // Ensure that the following read at an offset to the attr pointer is within bounds
378 assert_ptr_target_min_size(ecx, attr_op, 4)?;
379 let attr_place = ecx.deref_operand(attr_op)?;
380 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
381 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
382 ecx.read_scalar(kind_place.into())
385 fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
386 ecx: &mut MiriEvalContext<'mir, 'tcx>,
387 attr_op: OpTy<'tcx, Tag>,
388 kind: impl Into<ScalarMaybeUndef<Tag>>,
389 ) -> InterpResult<'tcx, ()> {
390 // Ensure that the following write at an offset to the attr pointer is within bounds
391 assert_ptr_target_min_size(ecx, attr_op, 4)?;
392 let attr_place = ecx.deref_operand(attr_op)?;
393 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
394 let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
395 ecx.write_scalar(kind.into(), kind_place.into())
398 // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
400 // Our chosen memory layout:
401 // bytes 0-3: reserved for signature on macOS
402 // (need to avoid this because it is set by static initializer macros)
403 // bytes 4-7: count of how many times this mutex has been locked, as a u32
404 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
405 // (the kind has to be at its offset for compatibility with static initializer macros)
407 static LIBC_MUTEX_KIND_OFFSET_CACHE: AtomicU64 = AtomicU64::new(0);
409 fn libc_mutex_kind_offset<'mir, 'tcx: 'mir>(
410 ecx: &mut MiriEvalContext<'mir, 'tcx>,
411 ) -> InterpResult<'tcx, u64> {
412 // Check if this offset has already been found and memoized
413 let cached_value = LIBC_MUTEX_KIND_OFFSET_CACHE.load(Ordering::Relaxed);
414 if cached_value != 0 {
415 return Ok(cached_value);
418 // This function infers the offset of the `kind` field of libc's pthread_mutex_t
419 // C struct by examining the array inside libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.
420 // At time of writing, it is always all zero bytes except for a one byte at one of
421 // four positions, depending on the target OS's C struct layout and the endianness of the
422 // target architecture. This offset will then be used in getters and setters below, so that
423 // mutexes created from static initializers can be emulated with the correct behavior.
424 let initializer_path = ["libc", "PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP"];
425 let initializer_instance = ecx.resolve_path(&initializer_path);
426 let initializer_cid = GlobalId { instance: initializer_instance, promoted: None };
427 let initializer_const_val = ecx.const_eval_raw(initializer_cid)?;
428 let array_mplacety = ecx.mplace_field(initializer_const_val, 0)?;
429 let array_length = match array_mplacety.layout.fields {
430 FieldsShape::Array { count, .. } => count,
431 _ => bug!("Couldn't get array length from type {:?}", array_mplacety.layout.ty),
434 let kind_offset = if array_length < 20 {
435 bug!("libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP array was shorter than expected");
436 } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 16)?.into())?.to_u8()? != 0 {
437 // for little-endian architectures
439 } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 19)?.into())?.to_u8()? != 0 {
440 // for big-endian architectures
441 // (note that the i32 spans bytes 16 through 19, so the offset of the kind field is 16)
443 } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 12)?.into())?.to_u8()? != 0 {
444 // for little-endian architectures
446 } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 15)?.into())?.to_u8()? != 0 {
447 // for big-endian architectures
448 // (note that the i32 spans bytes 12 through 15, so the offset of the kind field is 12)
451 bug!("Couldn't determine offset of `kind` in pthread_mutex_t");
454 // Save offset to memoization cache for future calls
455 LIBC_MUTEX_KIND_OFFSET_CACHE.store(kind_offset, Ordering::Relaxed);
459 fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
460 ecx: &MiriEvalContext<'mir, 'tcx>,
461 mutex_op: OpTy<'tcx, Tag>,
462 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
463 // Ensure that the following read at an offset to the mutex pointer is within bounds
464 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
465 let mutex_place = ecx.deref_operand(mutex_op)?;
466 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
467 let locked_count_place =
468 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
469 ecx.read_scalar(locked_count_place.into())
472 fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
473 ecx: &mut MiriEvalContext<'mir, 'tcx>,
474 mutex_op: OpTy<'tcx, Tag>,
475 locked_count: impl Into<ScalarMaybeUndef<Tag>>,
476 ) -> InterpResult<'tcx, ()> {
477 // Ensure that the following write at an offset to the mutex pointer is within bounds
478 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
479 let mutex_place = ecx.deref_operand(mutex_op)?;
480 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
481 let locked_count_place =
482 mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
483 ecx.write_scalar(locked_count.into(), locked_count_place.into())
486 fn mutex_get_kind<'mir, 'tcx: 'mir>(
487 ecx: &mut MiriEvalContext<'mir, 'tcx>,
488 mutex_op: OpTy<'tcx, Tag>,
489 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
490 // Ensure that the following read at an offset to the mutex pointer is within bounds
491 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
492 let mutex_place = ecx.deref_operand(mutex_op)?;
493 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
494 let kind_place = mutex_place.offset(
495 Size::from_bytes(libc_mutex_kind_offset(ecx)?),
500 ecx.read_scalar(kind_place.into())
503 fn mutex_set_kind<'mir, 'tcx: 'mir>(
504 ecx: &mut MiriEvalContext<'mir, 'tcx>,
505 mutex_op: OpTy<'tcx, Tag>,
506 kind: impl Into<ScalarMaybeUndef<Tag>>,
507 ) -> InterpResult<'tcx, ()> {
508 // Ensure that the following write at an offset to the mutex pointer is within bounds
509 assert_ptr_target_min_size(ecx, mutex_op, 20)?;
510 let mutex_place = ecx.deref_operand(mutex_op)?;
511 let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
512 let kind_place = mutex_place.offset(
513 Size::from_bytes(libc_mutex_kind_offset(ecx)?),
518 ecx.write_scalar(kind.into(), kind_place.into())
521 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
523 // Our chosen memory layout:
524 // bytes 0-3: reserved for signature on macOS
525 // (need to avoid this because it is set by static initializer macros)
526 // bytes 4-7: reader count, as a u32
527 // bytes 8-11: writer count, as a u32
529 fn rwlock_get_readers<'mir, 'tcx: 'mir>(
530 ecx: &MiriEvalContext<'mir, 'tcx>,
531 rwlock_op: OpTy<'tcx, Tag>,
532 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
533 // Ensure that the following read at an offset to the rwlock pointer is within bounds
534 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
535 let rwlock_place = ecx.deref_operand(rwlock_op)?;
536 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
538 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
539 ecx.read_scalar(readers_place.into())
542 fn rwlock_set_readers<'mir, 'tcx: 'mir>(
543 ecx: &mut MiriEvalContext<'mir, 'tcx>,
544 rwlock_op: OpTy<'tcx, Tag>,
545 readers: impl Into<ScalarMaybeUndef<Tag>>,
546 ) -> InterpResult<'tcx, ()> {
547 // Ensure that the following write at an offset to the rwlock pointer is within bounds
548 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
549 let rwlock_place = ecx.deref_operand(rwlock_op)?;
550 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
552 rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
553 ecx.write_scalar(readers.into(), readers_place.into())
556 fn rwlock_get_writers<'mir, 'tcx: 'mir>(
557 ecx: &MiriEvalContext<'mir, 'tcx>,
558 rwlock_op: OpTy<'tcx, Tag>,
559 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
560 // Ensure that the following read at an offset to the rwlock pointer is within bounds
561 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
562 let rwlock_place = ecx.deref_operand(rwlock_op)?;
563 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
565 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
566 ecx.read_scalar(writers_place.into())
569 fn rwlock_set_writers<'mir, 'tcx: 'mir>(
570 ecx: &mut MiriEvalContext<'mir, 'tcx>,
571 rwlock_op: OpTy<'tcx, Tag>,
572 writers: impl Into<ScalarMaybeUndef<Tag>>,
573 ) -> InterpResult<'tcx, ()> {
574 // Ensure that the following write at an offset to the rwlock pointer is within bounds
575 assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
576 let rwlock_place = ecx.deref_operand(rwlock_op)?;
577 let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
579 rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
580 ecx.write_scalar(writers.into(), writers_place.into())