This makes a new compile-fail test pass.
}
for (field_index, operand) in operands.iter().enumerate() {
let value = self.eval_operand(operand)?;
- let field_dest = self.lvalue_field(dest, field_index, dest_ty, value.ty)?;
+ let field_dest = self.lvalue_field(dest, mir::Field::new(field_index), dest_ty, value.ty)?;
self.write_value(value, field_dest)?;
}
Ok(())
/// ensures this Value is not a ByRef
pub(super) fn follow_by_ref_value(
- &mut self,
+ &self,
value: Value,
ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
}
pub fn value_to_primval(
- &mut self,
+ &self,
ValTy { value, ty } : ValTy<'tcx>,
) -> EvalResult<'tcx, PrimVal> {
match self.follow_by_ref_value(value, ty)? {
pub fn lvalue_field(
&mut self,
base: Lvalue,
- field_index: usize,
+ field: mir::Field,
base_ty: Ty<'tcx>,
field_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Lvalue> {
- let base_layout = self.type_layout(base_ty)?;
use rustc::ty::layout::Layout::*;
+
+ let base_layout = self.type_layout(base_ty)?;
+ let field_index = field.index();
let (offset, packed) = match *base_layout {
Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed),
use rustc::mir::ProjectionElem::*;
let (ptr, extra) = match *proj_elem {
Field(field, field_ty) => {
- return self.lvalue_field(base, field.index(), base_ty, field_ty);
+ return self.lvalue_field(base, field, base_ty, field_ty);
}
Downcast(_, variant) => {
use rustc::middle::region;
use super::{EvalResult, EvalErrorKind, PrimVal, Pointer, EvalContext, DynamicLifetime, Machine,
- RangeMap};
+ RangeMap, AbsLvalue};
////////////////////////////////////////////////////////////////////////////////
// Locks
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
-struct LockInfo {
+struct LockInfo<'tcx> {
/// Stores for which lifetimes (of the original write lock) we got
/// which suspensions.
- suspended: HashMap<DynamicLifetime, Vec<region::Scope>>,
+ suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
/// The current state of the lock that's actually effective.
active: Lock,
}
+/// Write locks are identified by a stack frame and an "abstract" (untyped) lvalue.
+/// It may be tempting to use the lifetime as identifier, but that does not work
+/// for two reasons:
+/// * First of all, due to subtyping, the same lock may be referred to with different
+/// lifetimes.
+/// * Secondly, different write locks may actually have the same lifetime. See `test2`
+/// in `run-pass/many_shr_bor.rs`.
+/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
+/// considers the path frozen and hence the Id remains stable.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct WriteLockId<'tcx> {
+ frame: usize,
+ path: AbsLvalue<'tcx>,
+}
+
#[derive(Clone, Debug, PartialEq)]
pub enum Lock {
NoLock,
}
use self::Lock::*;
-impl Default for LockInfo {
+impl<'tcx> Default for LockInfo<'tcx> {
fn default() -> Self {
LockInfo::new(NoLock)
}
}
-impl LockInfo {
- fn new(lock: Lock) -> LockInfo {
+impl<'tcx> LockInfo<'tcx> {
+ fn new(lock: Lock) -> LockInfo<'tcx> {
LockInfo {
suspended: HashMap::new(),
active: lock,
}
#[derive(Debug)]
-pub struct Allocation<M> {
+pub struct Allocation<'tcx, M> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer
pub bytes: Vec<u8>,
/// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
pub kind: MemoryKind<M>,
/// Memory regions that are locked by some function
- locks: RangeMap<LockInfo>,
+ locks: RangeMap<LockInfo<'tcx>>,
}
-impl<M> Allocation<M> {
- fn check_locks<'tcx>(
+impl<'tcx, M> Allocation<'tcx, M> {
+ fn check_locks(
&self,
frame: Option<usize>,
offset: u64,
len: u64,
access: AccessKind,
- ) -> Result<(), LockInfo> {
+ ) -> Result<(), LockInfo<'tcx>> {
if len == 0 {
return Ok(());
}
pub data: M::MemoryData,
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
- alloc_map: HashMap<u64, Allocation<M::MemoryKinds>>,
+ alloc_map: HashMap<u64, Allocation<'tcx, M::MemoryKinds>>,
/// The AllocId to assign to the next new regular allocation. Always incremented, never gets smaller.
next_alloc_id: u64,
/// Release or suspend a write lock of the given lifetime prematurely.
/// When releasing, if there is a read lock or someone else's write lock, that's an error.
- /// We *do* accept relasing a NoLock, as this can happen when a local is first acquired and later force_allocate'd.
+ /// If no lock is held, that's fine. This can happen when e.g. a local is initialized
+ /// from a constant, and then suspended.
/// When suspending, the same cases are fine; we just register an additional suspension.
pub(crate) fn suspend_write_lock(
&mut self,
ptr: MemoryPointer,
len: u64,
- lock_region: Option<region::Scope>,
+ lock_path: &AbsLvalue<'tcx>,
suspend: Option<region::Scope>,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
- let lock_lft = DynamicLifetime {
- frame: cur_frame,
- region: lock_region,
- };
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) {
let is_our_lock = match lock.active {
- WriteLock(lft) => lft == lock_lft,
+ WriteLock(lft) =>
+ // Double-check that we are holding the lock.
+ // (Due to subtyping, checking the region would not make any sense.)
+ lft.frame == cur_frame,
ReadLock(_) | NoLock => false,
};
if is_our_lock {
- trace!("Releasing {:?} at {:?}", lock.active, lock_lft);
+ trace!("Releasing {:?}", lock.active);
// Disable the lock
lock.active = NoLock;
} else {
trace!(
- "Not touching {:?} at {:?} as its not our lock",
+ "Not touching {:?} as it is not our lock",
lock.active,
- lock_lft
);
}
- match suspend {
- Some(suspend_region) => {
- trace!("Adding suspension to {:?} at {:?}", lock.active, lock_lft);
- // We just released this lock, so add a new suspension.
- // FIXME: Really, if there ever already is a suspension when is_our_lock, or if there is no suspension when !is_our_lock, something is amiss.
- // But this model is not good enough yet to prevent that.
- lock.suspended
- .entry(lock_lft)
- .or_insert_with(|| Vec::new())
- .push(suspend_region);
+ // Check if we want to register a suspension
+ if let Some(suspend_region) = suspend {
+ let lock_id = WriteLockId {
+ frame: cur_frame,
+ path: lock_path.clone(),
+ };
+ trace!("Adding suspension to {:?}", lock_id);
+ let mut new_suspension = false;
+ lock.suspended
+ .entry(lock_id)
+ // Remember whether we added a new suspension or not
+ .or_insert_with(|| { new_suspension = true; Vec::new() })
+ .push(suspend_region);
+ // If the suspension is new, we should have owned this.
+ // If there already was a suspension, we should NOT have owned this.
+ if new_suspension == is_our_lock {
+ // All is well
+ continue 'locks;
}
- None => {
- // Make sure we did not try to release someone else's lock.
- if !is_our_lock && lock.active != NoLock {
- return err!(InvalidMemoryLockRelease {
- ptr,
- len,
- frame: cur_frame,
- lock: lock.active.clone(),
- });
- }
+ } else {
+ if !is_our_lock {
+ // All is well.
+ continue 'locks;
}
}
+ // If we get here, releasing this is an error except for NoLock.
+ if lock.active != NoLock {
+ return err!(InvalidMemoryLockRelease {
+ ptr,
+ len,
+ frame: cur_frame,
+ lock: lock.active.clone(),
+ });
+ }
}
Ok(())
&mut self,
ptr: MemoryPointer,
len: u64,
+ lock_path: &AbsLvalue<'tcx>,
lock_region: Option<region::Scope>,
suspended_region: region::Scope,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
- let lock_lft = DynamicLifetime {
+ let lock_id = WriteLockId {
frame: cur_frame,
- region: lock_region,
+ path: lock_path.clone(),
};
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
for lock in alloc.locks.iter_mut(ptr.offset, len) {
// Check if we have a suspension here
- let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_lft) {
+ let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
None => {
trace!("No suspension around, we can just acquire");
(true, false)
}
Some(suspensions) => {
- trace!("Found suspension of {:?}, removing it", lock_lft);
+ trace!("Found suspension of {:?}, removing it", lock_id);
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
if remove_suspension {
// with NLL, we could do that up in the match above...
assert!(got_the_lock);
- lock.suspended.remove(&lock_lft);
+ lock.suspended.remove(&lock_id);
}
if got_the_lock {
match lock.active {
ref mut active @ NoLock => {
- *active = WriteLock(lock_lft);
+ *active = WriteLock(
+ DynamicLifetime {
+ frame: cur_frame,
+ region: lock_region,
+ }
+ );
}
_ => {
return err!(MemoryAcquireConflict {
if lock_ended {
lock.active = NoLock;
}
- // Also clean up suspended write locks
- lock.suspended.retain(|lft, _suspensions| !has_ended(lft));
+ // Also clean up suspended write locks when the function returns
+ if ending_region.is_none() {
+ lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
+ }
}
// Clean up the map
alloc.locks.retain(|lock| match lock.active {
/// Allocation accessors
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
- pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::MemoryKinds>> {
+ pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<'tcx, M::MemoryKinds>> {
match id.into_alloc_id_kind() {
AllocIdKind::Function(_) => err!(DerefFunctionPointer),
AllocIdKind::Runtime(id) => {
fn get_mut_unchecked(
&mut self,
id: AllocId,
- ) -> EvalResult<'tcx, &mut Allocation<M::MemoryKinds>> {
+ ) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
match id.into_alloc_id_kind() {
AllocIdKind::Function(_) => err!(DerefFunctionPointer),
AllocIdKind::Runtime(id) => {
}
}
- fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<M::MemoryKinds>> {
+ fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
let alloc = self.get_mut_unchecked(id)?;
if alloc.mutable == Mutability::Mutable {
Ok(alloc)
pub use self::machine::Machine;
-pub use self::validation::ValidationQuery;
+pub use self::validation::{ValidationQuery, AbsLvalue};
-use rustc::hir::Mutability;
+use rustc::hir::{self, Mutability};
use rustc::hir::Mutability::*;
use rustc::mir::{self, ValidationOp, ValidationOperand};
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
use rustc::infer::InferCtxt;
use rustc::traits::Reveal;
use rustc::middle::region;
+use rustc_data_structures::indexed_vec::Idx;
use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value,
- Lvalue, LvalueExtra, Machine};
+ Lvalue, LvalueExtra, Machine, ValTy};
-pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue>;
+pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsLvalue<'tcx>, Lvalue)>;
#[derive(Copy, Clone, Debug, PartialEq)]
enum ValidationMode {
}
}
-// Validity checks
+// Abstract lvalues
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub enum AbsLvalue<'tcx> {
+ Local(mir::Local),
+ Static(hir::def_id::DefId),
+ Projection(Box<AbsLvalueProjection<'tcx>>),
+}
+
+type AbsLvalueProjection<'tcx> = mir::Projection<'tcx, AbsLvalue<'tcx>, u64, ()>;
+type AbsLvalueElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
+
+impl<'tcx> AbsLvalue<'tcx> {
+ pub fn field(self, f: mir::Field) -> AbsLvalue<'tcx> {
+ self.elem(mir::ProjectionElem::Field(f, ()))
+ }
+
+ pub fn deref(self) -> AbsLvalue<'tcx> {
+ self.elem(mir::ProjectionElem::Deref)
+ }
+
+ pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsLvalue<'tcx> {
+ self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
+ }
+
+ pub fn index(self, index: u64) -> AbsLvalue<'tcx> {
+ self.elem(mir::ProjectionElem::Index(index))
+ }
+
+ fn elem(self, elem: AbsLvalueElem<'tcx>) -> AbsLvalue<'tcx> {
+ AbsLvalue::Projection(Box::new(AbsLvalueProjection {
+ base: self,
+ elem,
+ }))
+ }
+}
+
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+ fn abstract_lvalue_projection(&self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, AbsLvalueProjection<'tcx>> {
+ use self::mir::ProjectionElem::*;
+
+ let elem = match proj.elem {
+ Deref => Deref,
+ Field(f, _) => Field(f, ()),
+ Index(v) => {
+ let value = self.frame().get_local(v)?;
+ let ty = self.tcx.types.usize;
+ let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
+ Index(n)
+ },
+ ConstantIndex { offset, min_length, from_end } =>
+ ConstantIndex { offset, min_length, from_end },
+ Subslice { from, to } =>
+ Subslice { from, to },
+ Downcast(adt, sz) => Downcast(adt, sz),
+ };
+ Ok(AbsLvalueProjection {
+ base: self.abstract_lvalue(&proj.base)?,
+ elem
+ })
+ }
+
+ fn abstract_lvalue(&self, lval: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, AbsLvalue<'tcx>> {
+ Ok(match lval {
+ &mir::Lvalue::Local(l) => AbsLvalue::Local(l),
+ &mir::Lvalue::Static(ref s) => AbsLvalue::Static(s.def_id),
+ &mir::Lvalue::Projection(ref p) =>
+ AbsLvalue::Projection(Box::new(self.abstract_lvalue_projection(&*p)?)),
+ })
+ }
+
+ // Validity checks
pub(crate) fn validation_op(
&mut self,
op: ValidationOp,
// We need to monomorphize ty *without* erasing lifetimes
let ty = operand.ty.subst(self.tcx, self.substs());
let lval = self.eval_lvalue(&operand.lval)?;
+ let abs_lval = self.abstract_lvalue(&operand.lval)?;
let query = ValidationQuery {
- lval,
+ lval: (abs_lval, lval),
ty,
re: operand.re,
mutbl: operand.mutbl,
mode: ValidationMode,
) -> EvalResult<'tcx> {
// TODO: Maybe take visibility/privacy into account.
- for (idx, field) in variant.fields.iter().enumerate() {
- let field_ty = field.ty(self.tcx, subst);
- let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
+ for (idx, field_def) in variant.fields.iter().enumerate() {
+ let field_ty = field_def.ty(self.tcx, subst);
+ let field = mir::Field::new(idx);
+ let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
self.validate(
ValidationQuery {
- lval: field_lvalue,
+ lval: (query.lval.0.clone().field(field), field_lvalue),
ty: field_ty,
..query
},
fn validate_ptr(
&mut self,
val: Value,
+ abs_lval: AbsLvalue<'tcx>,
pointee_ty: Ty<'tcx>,
re: Option<region::Scope>,
mutbl: Mutability,
let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
self.validate(
ValidationQuery {
- lval: pointee_lvalue,
+ lval: (abs_lval.deref(), pointee_lvalue),
ty: pointee_ty,
re,
mutbl,
// HACK: For now, bail out if we hit a dead local during recovery (can happen because sometimes we have
// StorageDead before EndRegion due to https://github.com/rust-lang/rust/issues/43481).
// TODO: We should rather fix the MIR.
- match query.lval {
+ match query.lval.1 {
Lvalue::Local { frame, local } => {
let res = self.stack[frame].get_local(local);
match (res, mode) {
// Tracking the same state for locals not backed by memory would just duplicate too
// much machinery.
// FIXME: We ignore alignment.
- let (ptr, extra) = self.force_allocation(query.lval)?.to_ptr_extra_aligned();
+ let (ptr, extra) = self.force_allocation(query.lval.1)?.to_ptr_extra_aligned();
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
self.memory.recover_write_lock(
ptr,
len,
+ &query.lval.0,
query.re,
ending_ce,
)?
self.memory.suspend_write_lock(
ptr,
len,
- query.re,
+ &query.lval.0,
suspended_ce,
)?
}
ty: pointee_ty,
mutbl,
}) => {
- let val = self.read_lvalue(query.lval)?;
+ let val = self.read_lvalue(query.lval.1)?;
// Sharing restricts our context
if mutbl == MutImmutable {
query.mutbl = MutImmutable;
_ => {}
}
}
- self.validate_ptr(val, pointee_ty, query.re, query.mutbl, mode)
+ self.validate_ptr(val, query.lval.0, pointee_ty, query.re, query.mutbl, mode)
}
TyAdt(adt, _) if adt.is_box() => {
- let val = self.read_lvalue(query.lval)?;
- self.validate_ptr(val, query.ty.boxed_ty(), query.re, query.mutbl, mode)
+ let val = self.read_lvalue(query.lval.1)?;
+ self.validate_ptr(val, query.lval.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)
}
TyFnPtr(_sig) => {
- let ptr = self.read_lvalue(query.lval)?
+ let ptr = self.read_lvalue(query.lval.1)?
.into_ptr(&self.memory)?
.to_ptr()?;
self.memory.get_fn(ptr)?;
// Compound types
TySlice(elem_ty) => {
- let len = match query.lval {
+ let len = match query.lval.1 {
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
_ => {
bug!(
}
};
for i in 0..len {
- let inner_lvalue = self.lvalue_index(query.lval, query.ty, i)?;
+ let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i)?;
self.validate(
ValidationQuery {
- lval: inner_lvalue,
+ lval: (query.lval.0.clone().index(i), inner_lvalue),
ty: elem_ty,
..query
},
TyArray(elem_ty, len) => {
let len = len.val.to_const_int().unwrap().to_u64().unwrap();
for i in 0..len {
- let inner_lvalue = self.lvalue_index(query.lval, query.ty, i as u64)?;
+ let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i as u64)?;
self.validate(
ValidationQuery {
- lval: inner_lvalue,
+ lval: (query.lval.0.clone().index(i as u64), inner_lvalue),
ty: elem_ty,
..query
},
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
- let vtable = match query.lval {
+ let vtable = match query.lval.1 {
Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
_ => {
bug!(
match adt.adt_kind() {
AdtKind::Enum => {
// TODO: Can we get the discriminant without forcing an allocation?
- let ptr = self.force_allocation(query.lval)?.to_ptr()?;
+ let ptr = self.force_allocation(query.lval.1)?.to_ptr()?;
let discr = self.read_discriminant_value(ptr, query.ty)?;
// Get variant index for discriminant
if variant.fields.len() > 0 {
// Downcast to this variant, if needed
let lval = if adt.variants.len() > 1 {
- self.eval_lvalue_projection(
- query.lval,
- query.ty,
- &mir::ProjectionElem::Downcast(adt, variant_idx),
- )?
+ (
+ query.lval.0.downcast(adt, variant_idx),
+ self.eval_lvalue_projection(
+ query.lval.1,
+ query.ty,
+ &mir::ProjectionElem::Downcast(adt, variant_idx),
+ )?,
+ )
} else {
query.lval
};
}
TyTuple(ref types, _) => {
for (idx, field_ty) in types.iter().enumerate() {
- let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
+ let field = mir::Field::new(idx);
+ let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
self.validate(
ValidationQuery {
- lval: field_lvalue,
+ lval: (query.lval.0.clone().field(field), field_lvalue),
ty: field_ty,
..query
},
}
TyClosure(def_id, ref closure_substs) => {
for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
- let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
+ let field = mir::Field::new(idx);
+ let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
self.validate(
ValidationQuery {
- lval: field_lvalue,
+ lval: (query.lval.0.clone().field(field), field_lvalue),
ty: field_ty,
..query
},
--- /dev/null
+// Make sure validation can handle many overlapping shared borrows for different parts of a data structure
+#![allow(unused_variables)]
+use std::cell::RefCell;
+
+fn evil(x: *mut i32) {
+ unsafe { *x = 0; } //~ ERROR: in conflict with lock WriteLock
+}
+
+fn test(r: &mut RefCell<i32>) {
+ let x = &*r; // releasing write lock, first suspension recorded
+ let mut x_ref = x.borrow_mut();
+ let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock
+ {
+ let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension
+ let y = &*r; // second suspension for the outer write lock
+ let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock
+ }
+ // If the two locks are mixed up, here we should have a write lock, but we do not.
+ evil(x_inner as *mut _);
+}
+
+fn main() {
+ test(&mut RefCell::new(0));
+}
-// Make sure validation can handle many overlapping shared borrows for difference parts of a data structure
+// Make sure validation can handle many overlapping shared borrows for different parts of a data structure
#![allow(unused_variables)]
use std::cell::RefCell;
fn test2(r: &mut RefCell<i32>) {
let x = &*r; // releasing write lock, first suspension recorded
let mut x_ref = x.borrow_mut();
- let x_inner : &mut i32 = &mut *x_ref;
+ let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock
let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension
let y = &*r; // second suspension for the outer write lock
let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock