1 use std::cell::RefCell;
2 use std::collections::HashSet;
5 use std::num::NonZeroU64;
7 use rustc::ty::{self, layout::Size};
8 use rustc::hir::{Mutability, MutMutable, MutImmutable};
9 use rustc::mir::RetagKind;
12 EvalResult, InterpError, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor,
13 MemoryKind, MiriMemoryKind, RangeMap, Allocation, AllocationExtra,
14 Pointer, Immediate, ImmTy, PlaceTy, MPlaceTy,
17 pub type PtrId = NonZeroU64;
18 pub type CallId = u64;
20 /// Tracking pointer provenance
21 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
27 impl fmt::Display for Tag {
28 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
30 Tag::Tagged(id) => write!(f, "{}", id),
31 Tag::Untagged => write!(f, "<untagged>"),
36 /// Indicates which permission is granted (by this item to some pointers)
37 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
39 /// Grants unique mutable access.
41 /// Grants shared mutable access.
43 /// Greants shared read-only access.
47 /// An item in the per-location borrow stack.
48 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
50 /// Grants the given permission for pointers with this tag.
51 Permission(Permission, Tag),
52 /// A barrier, tracking the function it belongs to by its index on the call stack.
56 impl fmt::Display for Item {
57 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
59 Item::Permission(perm, tag) => write!(f, "[{:?} for {}]", perm, tag),
60 Item::FnBarrier(call) => write!(f, "[barrier {}]", call),
65 /// Extra per-location state.
66 #[derive(Clone, Debug, PartialEq, Eq)]
68 /// Used *mostly* as a stack; never empty.
69 /// We sometimes push into the middle but never remove from the middle.
70 /// The same tag may occur multiple times, e.g. from a two-phase borrow.
72 /// * Above a `SharedReadOnly` there can only be barriers and more `SharedReadOnly`.
77 /// Extra per-allocation state.
78 #[derive(Clone, Debug)]
80 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
81 stacks: RefCell<RangeMap<Stack>>,
82 // Pointer to global state
86 /// Extra global state, available to the memory access hooks.
88 pub struct GlobalState {
91 active_calls: HashSet<CallId>,
93 pub type MemoryState = Rc<RefCell<GlobalState>>;
95 /// Indicates which kind of access is being performed.
96 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
99 Write { dealloc: bool },
102 // "Fake" constructors
104 fn write() -> AccessKind {
105 AccessKind::Write { dealloc: false }
108 fn dealloc() -> AccessKind {
109 AccessKind::Write { dealloc: true }
113 impl fmt::Display for AccessKind {
114 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
116 AccessKind::Read => write!(f, "read"),
117 AccessKind::Write { dealloc: false } => write!(f, "write"),
118 AccessKind::Write { dealloc: true } => write!(f, "deallocation"),
123 /// Indicates which kind of reference is being created.
124 /// Used by `reborrow` to compute which permissions to grant to the
126 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
130 /// `&` with or without interior mutability.
131 Shared { frozen: bool },
132 /// `*` (raw pointer).
136 impl fmt::Display for RefKind {
137 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
139 RefKind::Mutable => write!(f, "mutable"),
140 RefKind::Shared { frozen: true } => write!(f, "shared (frozen)"),
141 RefKind::Shared { frozen: false } => write!(f, "shared (mutable)"),
142 RefKind::Raw => write!(f, "raw"),
147 /// Utilities for initialization and ID generation
148 impl Default for GlobalState {
149 fn default() -> Self {
151 next_ptr_id: NonZeroU64::new(1).unwrap(),
153 active_calls: HashSet::default(),
159 pub fn new_ptr(&mut self) -> PtrId {
160 let id = self.next_ptr_id;
161 self.next_ptr_id = NonZeroU64::new(id.get() + 1).unwrap();
165 pub fn new_call(&mut self) -> CallId {
166 let id = self.next_call_id;
167 trace!("new_call: Assigning ID {}", id);
168 self.active_calls.insert(id);
169 self.next_call_id = id+1;
173 pub fn end_call(&mut self, id: CallId) {
174 assert!(self.active_calls.remove(&id));
177 fn is_active(&self, id: CallId) -> bool {
178 self.active_calls.contains(&id)
182 // # Stacked Borrows Core Begin
184 /// We need to make at least the following things true:
186 /// U1: After creating a `Uniq`, it is at the top.
187 /// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it it.
188 /// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
190 /// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
191 /// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
192 /// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
194 /// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
195 /// F3: If an access happens with an `&` outside `UnsafeCell`,
196 /// it requires the `SharedReadOnly` to still be in the stack.
198 impl Default for Tag {
200 fn default() -> Tag {
205 /// Core relations on `Permission` define which accesses are allowed:
206 /// On every access, we try to find a *granting* item, and then we remove all
207 /// *incompatible* items above it.
209 /// This defines for a given permission, whether it permits the given kind of access.
210 fn grants(self, access: AccessKind) -> bool {
211 match (self, access) {
212 // Unique and SharedReadWrite allow any kind of access.
213 (Permission::Unique, _) |
214 (Permission::SharedReadWrite, _) =>
216 // SharedReadOnly only permits read access.
217 (Permission::SharedReadOnly, AccessKind::Read) =>
219 (Permission::SharedReadOnly, AccessKind::Write { .. }) =>
224 /// This defines for a given permission, which other permissions it can tolerate "above" itself
225 /// for which kinds of accesses.
226 /// If true, then `other` is allowed to remain on top of `self` when `access` happens.
227 fn compatible_with(self, access: AccessKind, other: Permission) -> bool {
228 use self::Permission::*;
230 match (self, access, other) {
231 // Some cases are impossible.
232 (SharedReadOnly, _, SharedReadWrite) |
233 (SharedReadOnly, _, Unique) =>
234 bug!("There can never be a SharedReadWrite or a Unique on top of a SharedReadOnly"),
235 // When `other` is `SharedReadOnly`, that is NEVER compatible with
237 // This makes sure read-only pointers become invalid on write accesses (ensures F2a).
238 (_, AccessKind::Write { .. }, SharedReadOnly) =>
240 // When `other` is `Unique`, that is compatible with nothing.
241 // This makes sure unique pointers become invalid on incompatible accesses (ensures U2).
244 // When we are unique and this is a write/dealloc, we tolerate nothing.
245 // This makes sure we re-assert uniqueness ("being on top") on write accesses.
246 // (This is particularily important such that when a new mutable ref gets created, it gets
247 // pushed into the right item -- this behaves like a write and we assert uniqueness of the
248 // pointer from which this comes, *if* it was a unique pointer.)
249 (Unique, AccessKind::Write { .. }, _) =>
251 // `SharedReadWrite` items can tolerate any other akin items for any kind of access.
252 (SharedReadWrite, _, SharedReadWrite) =>
254 // Any item can tolerate read accesses for shared items.
255 // This includes unique items! Reads from unique pointers do not invalidate
257 (_, AccessKind::Read, SharedReadWrite) |
258 (_, AccessKind::Read, SharedReadOnly) =>
266 /// Defines which kind of access the "parent" must grant to create this reference.
267 fn access(self) -> AccessKind {
269 RefKind::Mutable | RefKind::Shared { frozen: false } => AccessKind::write(),
270 RefKind::Raw | RefKind::Shared { frozen: true } => AccessKind::Read,
271 // FIXME: Just requiring read-only access for raw means that a raw ptr might not be writeable
272 // even when we think it should be! Think about this some more.
276 /// This defines the new permission used when a pointer gets created: For raw pointers, whether these are read-only
277 /// or read-write depends on the permission from which they derive.
278 fn new_perm(self, derived_from: Permission) -> EvalResult<'tcx, Permission> {
279 Ok(match (self, derived_from) {
280 // Do not derive writable safe pointer from read-only pointer!
281 (RefKind::Mutable, Permission::SharedReadOnly) =>
282 return err!(MachineError(format!(
283 "deriving mutable reference from read-only pointer"
285 (RefKind::Shared { frozen: false }, Permission::SharedReadOnly) =>
286 return err!(MachineError(format!(
287 "deriving shared reference with interior mutability from read-only pointer"
289 // Safe pointer cases.
290 (RefKind::Mutable, _) => Permission::Unique,
291 (RefKind::Shared { frozen: true }, _) => Permission::SharedReadOnly,
292 (RefKind::Shared { frozen: false }, _) => Permission::SharedReadWrite,
293 // Raw pointer cases.
294 (RefKind::Raw, Permission::SharedReadOnly) => Permission::SharedReadOnly,
295 (RefKind::Raw, _) => Permission::SharedReadWrite,
300 /// Core per-location operations: access, create.
302 /// Find the item granting the given kind of access to the given tag, and where that item is in the stack.
303 fn find_granting(&self, access: AccessKind, tag: Tag) -> Option<(usize, Permission)> {
305 .enumerate() // we also need to know *where* in the stack
306 .rev() // search top-to-bottom
307 // Return permission of first item that grants access.
308 // We require a permission with the right tag, ensuring U3 and F3.
309 .filter_map(|(idx, item)| match item {
310 &Item::Permission(perm, item_tag) if perm.grants(access) && tag == item_tag =>
317 /// Test if a memory `access` using pointer tagged `tag` is granted.
318 /// If yes, return the index of the item that granted it.
323 global: &GlobalState,
324 ) -> EvalResult<'tcx, usize> {
325 // Two main steps: Find granting item, remove all incompatible items above.
326 // The second step is where barriers get implemented: they "protect" the items
327 // below them, meaning that if we remove an item and then further up encounter a barrier,
328 // we raise an error.
329 // Afterwards we just do some post-processing for deallocation accesses.
331 // Step 1: Find granting item.
332 let (granting_idx, granting_perm) = self.find_granting(access, tag)
333 .ok_or_else(|| InterpError::MachineError(format!(
334 "no item granting {} access to tag {} found in borrow stack",
338 // Step 2: Remove everything incompatible above them.
339 // Items below an active barrier however may not be removed, so we check that as well.
340 // We do *not* maintain a stack discipline here. We could, in principle, decide to only
341 // keep the items immediately above `granting_idx` that are compatible, and then pop the rest.
342 // However, that kills off entire "branches" of pointer derivation too easily:
343 // in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement would pop the `Unique`
344 // from the reborrow of the first statement, and subequently also pop the `SharedReadWrite` for `raw`.
346 // Implemented with indices because there does not seem to be a nice iterator and range-based
348 let mut cur = granting_idx + 1;
349 let mut removed_item = None;
350 while let Some(item) = self.borrows.get(cur) {
352 Item::Permission(perm, _) => {
353 if granting_perm.compatible_with(access, perm) {
354 // Keep this, check next.
357 // Aha! This is a bad one, remove it.
358 let item = self.borrows.remove(cur);
359 trace!("access: popping item {}", item);
360 removed_item = Some(item);
363 Item::FnBarrier(call) if !global.is_active(call) => {
364 // An inactive barrier, just get rid of it. (Housekeeping.)
365 self.borrows.remove(cur);
367 Item::FnBarrier(call) => {
368 // We hit an active barrier! If we have already removed an item,
369 // we got a problem! The barrier was supposed to protect this item.
370 if let Some(removed_item) = removed_item {
371 return err!(MachineError(format!(
372 "not granting {} access to tag {} because barrier ({}) protects incompatible item {}",
373 access, tag, call, removed_item
376 // Keep this, check next.
384 // If we got here, we found a matching item. Congratulations!
385 // However, we are not done yet: If this access is deallocating, we must make sure
386 // there are no active barriers remaining on the stack.
387 if access == AccessKind::dealloc() {
388 for &itm in self.borrows.iter().rev() {
390 Item::FnBarrier(call) if global.is_active(call) => {
391 return err!(MachineError(format!(
392 "deallocating with active barrier ({})", call
401 return Ok(granting_idx);
404 /// `reborrow` helper function.
405 /// Grant `permisson` to new pointer tagged `tag`, added at `position` in the stack.
406 fn grant(&mut self, perm: Permission, tag: Tag, position: usize) {
407 // Simply add it to the "stack" -- this might add in the middle.
408 // As an optimization, do nothing if the new item is identical to one of its neighbors.
409 let item = Item::Permission(perm, tag);
410 if self.borrows[position-1] == item || self.borrows.get(position) == Some(&item) {
411 // Optimization applies, done.
412 trace!("reborrow: avoiding redundant item {}", item);
415 trace!("reborrow: pushing item {}", item);
416 self.borrows.insert(position, item);
419 /// `reborrow` helper function.
421 fn barrier(&mut self, call: CallId) {
422 let itm = Item::FnBarrier(call);
423 if *self.borrows.last().unwrap() == itm {
424 // This is just an optimization, no functional change: Avoid stacking
425 // multiple identical barriers on top of each other.
426 // This can happen when a function receives several shared references
428 trace!("reborrow: avoiding redundant extra barrier");
430 trace!("reborrow: pushing barrier for call {}", call);
431 self.borrows.push(itm);
435 /// `reborrow` helper function: test that the stack invariants are still maintained.
436 fn test_invariants(&self) {
437 let mut saw_shared_read_only = false;
438 for item in self.borrows.iter() {
440 Item::Permission(Permission::SharedReadOnly, _) => {
441 saw_shared_read_only = true;
443 Item::Permission(perm, _) if saw_shared_read_only => {
444 panic!("Found {:?} on top of a SharedReadOnly!", perm);
451 /// Derived a new pointer from one with the given tag.
455 barrier: Option<CallId>,
458 global: &GlobalState,
459 ) -> EvalResult<'tcx> {
460 // Find the permission "from which we derive". To this end we first have to decide
461 // if we derive from a permission that grants writes or just reads.
462 let access = new_kind.access();
463 // Now we figure out which item grants our parent (`derived_from`) permission.
464 // We use that to determine (a) where to put the new item, and for raw pointers
465 // (b) whether to given read-only or read-write access.
466 // FIXME: This handling of raw pointers is fragile, very fragile. What if we do
467 // not get "the right one", like when there are multiple items granting `derived_from`
468 // and we accidentally create a read-only pointer? This can happen for two-phase borrows
469 // (then there's a `Unique` and a `SharedReadOnly` for the same tag), and for raw pointers
470 // (which currently all are `Untagged`).
471 let (derived_from_idx, derived_from_perm) = self.find_granting(access, derived_from)
472 .ok_or_else(|| InterpError::MachineError(format!(
473 "no item to reborrow as {} from tag {} found in borrow stack", new_kind, derived_from,
475 // With this we can compute the permission for the new pointer.
476 let new_perm = new_kind.new_perm(derived_from_perm).expect("this should never fail");
478 // We behave very differently for the "unsafe" case of a shared-read-write pointer
479 // ("unsafe" because this also applies to shared references with interior mutability).
480 // This is because such pointers may be reborrowed to unique pointers that actually
481 // remain valid when their "parents" get further reborrows!
482 // However, either way, we ensure that we insert the new item in a way that between
483 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
484 if new_perm == Permission::SharedReadWrite {
485 // A very liberal reborrow because the new pointer does not expect any kind of aliasing guarantee.
486 // Just insert new permission as child of old permission, and maintain everything else.
487 // This inserts "as far down as possible", which is good because it makes this pointer as
488 // long-lived as possible *and* we want all the items that are incompatible with this
489 // to actually get removed from the stack. If we pushed a `SharedReadWrite` on top of
490 // a `SharedReadOnly`, we'd violate the invariant that `SaredReadOnly` are at the top
491 // and we'd allow write access without invalidating frozen shared references!
492 // This ensures F2b for `SharedReadWrite` by adding the new item below any
493 // potentially existing `SharedReadOnly`.
494 self.grant(new_perm, new_tag, derived_from_idx+1);
496 // No barrier. They can rightfully alias with `&mut`.
497 // FIXME: This means that the `dereferencable` attribute on non-frozen shared references
498 // is incorrect! They are dereferencable when the function is called, but might become
499 // non-dereferencable during the course of execution.
500 // Also see [1], [2].
502 // [1]: <https://internals.rust-lang.org/t/
503 // is-it-possible-to-be-memory-safe-with-deallocated-self/8457/8>,
504 // [2]: <https://lists.llvm.org/pipermail/llvm-dev/2018-July/124555.html>
506 // A "safe" reborrow for a pointer that actually expects some aliasing guarantees.
507 // Here, creating a reference actually counts as an access, and pops incompatible
508 // stuff off the stack.
509 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
510 let check_idx = self.access(access, derived_from, global)?;
511 assert_eq!(check_idx, derived_from_idx, "somehow we saw different items??");
513 // We insert "as far up as possible": We know only compatible items are remaining
514 // on top of `derived_from`, and we want the new item at the top so that we
515 // get the strongest possible guarantees.
516 // This ensures U1 and F1.
517 self.grant(new_perm, new_tag, self.borrows.len());
519 // Now is a good time to add the barrier, protecting the item we just added.
520 if let Some(call) = barrier {
525 // Make sure that after all this, the stack's invariant is still maintained.
526 if cfg!(debug_assertions) {
527 self.test_invariants();
534 /// Higher-level per-location operations: deref, access, reborrow.
536 /// Creates new stack with initial tag.
542 let item = Item::Permission(Permission::Unique, tag);
547 stacks: RefCell::new(RangeMap::new(size, stack)),
552 /// `ptr` got used, reflect that in the stack.
558 ) -> EvalResult<'tcx> {
559 trace!("{} access of tag {}: {:?}, size {}", kind, ptr.tag, ptr, size.bytes());
560 // Even reads can have a side-effect, by invalidating other references.
561 // This is fundamentally necessary since `&mut` asserts that there
562 // are no accesses through other references, not even reads.
563 let global = self.global.borrow();
564 let mut stacks = self.stacks.borrow_mut();
565 for stack in stacks.iter_mut(ptr.offset, size) {
566 stack.access(kind, ptr.tag, &*global)?;
571 /// Reborrow the given pointer to the new tag for the given kind of reference.
572 /// This works on `&self` because we might encounter references to constant memory.
577 barrier: Option<CallId>,
580 ) -> EvalResult<'tcx> {
582 "{} reborrow for tag {} to {}: {:?}, size {}",
583 new_kind, ptr.tag, new_tag, ptr, size.bytes(),
585 let global = self.global.borrow();
586 let mut stacks = self.stacks.borrow_mut();
587 for stack in stacks.iter_mut(ptr.offset, size) {
588 stack.reborrow(ptr.tag, barrier, new_kind, new_tag, &*global)?;
594 // # Stacked Borrows Core End
596 // Glue code to connect with Miri Machine Hooks
599 pub fn new_allocation(
602 kind: MemoryKind<MiriMemoryKind>,
604 let tag = match kind {
605 MemoryKind::Stack => {
606 // New unique borrow. This `Uniq` is not accessible by the program,
607 // so it will only ever be used when using the local directly (i.e.,
608 // not through a pointer). That is, whenever we directly use a local, this will pop
609 // everything else off the stack, invalidating all previous pointers,
610 // and in particular, *all* raw pointers. This subsumes the explicit
611 // `reset` which the blog post [1] says to perform when accessing a local.
613 // [1]: <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>
614 Tag::Tagged(extra.borrow_mut().new_ptr())
620 let stack = Stacks::new(size, tag, Rc::clone(extra));
625 impl AllocationExtra<Tag> for Stacks {
627 fn memory_read<'tcx>(
628 alloc: &Allocation<Tag, Stacks>,
631 ) -> EvalResult<'tcx> {
632 alloc.extra.access(ptr, size, AccessKind::Read)
636 fn memory_written<'tcx>(
637 alloc: &mut Allocation<Tag, Stacks>,
640 ) -> EvalResult<'tcx> {
641 alloc.extra.access(ptr, size, AccessKind::write())
645 fn memory_deallocated<'tcx>(
646 alloc: &mut Allocation<Tag, Stacks>,
649 ) -> EvalResult<'tcx> {
650 alloc.extra.access(ptr, size, AccessKind::dealloc())
654 impl<'a, 'mir, 'tcx> EvalContextPrivExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
655 trait EvalContextPrivExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
658 place: MPlaceTy<'tcx, Tag>,
660 mutbl: Option<Mutability>,
663 ) -> EvalResult<'tcx> {
664 let this = self.eval_context_mut();
665 let barrier = if fn_barrier { Some(this.frame().extra) } else { None };
666 let ptr = place.ptr.to_ptr()?;
667 trace!("reborrow: creating new reference for {:?} (pointee {}): {:?}",
668 ptr, place.layout.ty, new_tag);
670 // Get the allocation. It might not be mutable, so we cannot use `get_mut`.
671 let alloc = this.memory().get(ptr.alloc_id)?;
672 alloc.check_bounds(this, ptr, size)?;
673 // Update the stacks.
674 if mutbl == Some(MutImmutable) {
675 // Reference that cares about freezing. We need a frozen-sensitive reborrow.
676 this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
677 let new_kind = RefKind::Shared { frozen };
678 alloc.extra.reborrow(cur_ptr, size, barrier, new_kind, new_tag)
681 // Just treat this as one big chunk.
682 let new_kind = if mutbl == Some(MutMutable) { RefKind::Mutable } else { RefKind::Raw };
683 alloc.extra.reborrow(ptr, size, barrier, new_kind, new_tag)?;
688 /// Retags an indidual pointer, returning the retagged version.
689 /// `mutbl` can be `None` to make this a raw pointer.
692 val: ImmTy<'tcx, Tag>,
693 mutbl: Option<Mutability>,
696 ) -> EvalResult<'tcx, Immediate<Tag>> {
697 let this = self.eval_context_mut();
698 // We want a place for where the ptr *points to*, so we get one.
699 let place = this.ref_to_mplace(val)?;
700 let size = this.size_and_align_of_mplace(place)?
701 .map(|(size, _)| size)
702 .unwrap_or_else(|| place.layout.size);
703 if size == Size::ZERO {
704 // Nothing to do for ZSTs.
708 // Compute new borrow.
709 let new_tag = match mutbl {
710 Some(_) => Tag::Tagged(this.memory().extra.borrow_mut().new_ptr()),
711 None => Tag::Untagged,
715 this.reborrow(place, size, mutbl, new_tag, fn_barrier)?;
716 let new_place = place.replace_tag(new_tag);
717 // Handle two-phase borrows.
719 assert!(mutbl == Some(MutMutable), "two-phase shared borrows make no sense");
720 // Grant read access *to the parent pointer* with the old tag. This means the same pointer
721 // has multiple items in the stack now!
722 // FIXME: Think about this some more, in particular about the interaction with cast-to-raw.
723 // Maybe find a better way to express 2-phase, now that we have a "more expressive language"
725 let old_tag = place.ptr.to_ptr().unwrap().tag;
726 this.reborrow(new_place, size, Some(MutImmutable), old_tag, /* fn_barrier: */ false)?;
729 // Return new pointer.
730 Ok(new_place.to_ref())
734 impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
735 pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
739 place: PlaceTy<'tcx, Tag>
740 ) -> EvalResult<'tcx> {
741 let this = self.eval_context_mut();
742 // Determine mutability and whether to add a barrier.
743 // Cannot use `builtin_deref` because that reports *immutable* for `Box`,
744 // making it useless.
745 fn qualify(ty: ty::Ty<'_>, kind: RetagKind) -> Option<(Option<Mutability>, bool)> {
747 // References are simple.
748 ty::Ref(_, _, mutbl) => Some((Some(mutbl), kind == RetagKind::FnEntry)),
749 // Raw pointers need to be enabled.
750 ty::RawPtr(..) if kind == RetagKind::Raw => Some((None, false)),
751 // Boxes do not get a barrier: barriers reflect that references outlive the call
752 // they were passed in to; that's just not the case for boxes.
753 ty::Adt(..) if ty.is_box() => Some((Some(MutMutable), false)),
758 // We need a visitor to visit all references. However, that requires
759 // a `MemPlace`, so we have a fast path for reference types that
760 // avoids allocating.
761 if let Some((mutbl, barrier)) = qualify(place.layout.ty, kind) {
763 let val = this.read_immediate(this.place_to_op(place)?)?;
764 let val = this.retag_reference(val, mutbl, barrier, kind == RetagKind::TwoPhase)?;
765 this.write_immediate(val, place)?;
768 let place = this.force_allocation(place)?;
770 let mut visitor = RetagVisitor { ecx: this, kind };
771 visitor.visit_value(place)?;
773 // The actual visitor.
774 struct RetagVisitor<'ecx, 'a, 'mir, 'tcx> {
775 ecx: &'ecx mut MiriEvalContext<'a, 'mir, 'tcx>,
778 impl<'ecx, 'a, 'mir, 'tcx>
779 MutValueVisitor<'a, 'mir, 'tcx, Evaluator<'tcx>>
781 RetagVisitor<'ecx, 'a, 'mir, 'tcx>
783 type V = MPlaceTy<'tcx, Tag>;
786 fn ecx(&mut self) -> &mut MiriEvalContext<'a, 'mir, 'tcx> {
790 // Primitives of reference type, that is the one thing we are interested in.
791 fn visit_primitive(&mut self, place: MPlaceTy<'tcx, Tag>) -> EvalResult<'tcx>
793 // Cannot use `builtin_deref` because that reports *immutable* for `Box`,
794 // making it useless.
795 if let Some((mutbl, barrier)) = qualify(place.layout.ty, self.kind) {
796 let val = self.ecx.read_immediate(place.into())?;
797 let val = self.ecx.retag_reference(
801 self.kind == RetagKind::TwoPhase
803 self.ecx.write_immediate(val, place.into())?;