1 //! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2 //! for further information.
5 use std::cell::RefCell;
7 use std::num::NonZeroU64;
9 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10 use rustc_hir::Mutability;
11 use rustc_middle::mir::RetagKind;
12 use rustc_middle::ty::{
14 layout::{HasParamEnv, LayoutOf},
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::Size;
18 use std::collections::HashSet;
23 use diagnostics::{AllocHistory, TagHistory};
25 pub type PtrId = NonZeroU64;
26 pub type CallId = NonZeroU64;
27 pub type AllocExtra = Stacks;
29 /// Tracking pointer provenance
30 #[derive(Copy, Clone, Hash, PartialEq, Eq)]
36 impl fmt::Debug for SbTag {
37 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
39 SbTag::Tagged(id) => write!(f, "<{}>", id),
40 SbTag::Untagged => write!(f, "<untagged>"),
45 /// Indicates which permission is granted (by this item to some pointers)
46 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
48 /// Grants unique mutable access.
50 /// Grants shared mutable access.
52 /// Grants shared read-only access.
54 /// Grants no access, but separates two groups of SharedReadWrite so they are not
55 /// all considered mutually compatible.
59 /// An item in the per-location borrow stack.
60 #[derive(Copy, Clone, Hash, PartialEq, Eq)]
62 /// The permission this item grants.
64 /// The pointers the permission is granted to.
66 /// An optional protector, ensuring the item cannot get popped until `CallId` is over.
67 protector: Option<CallId>,
70 impl fmt::Debug for Item {
71 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
72 write!(f, "[{:?} for {:?}", self.perm, self.tag)?;
73 if let Some(call) = self.protector {
74 write!(f, " (call {})", call)?;
81 /// Extra per-location state.
82 #[derive(Clone, Debug, PartialEq, Eq)]
84 /// Used *mostly* as a stack; never empty.
86 /// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
87 /// * Except for `Untagged`, no tag occurs in the stack more than once.
91 /// Extra per-allocation state.
92 #[derive(Clone, Debug)]
94 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
95 stacks: RefCell<RangeMap<Stack>>,
96 /// Stores past operations on this allocation
97 history: RefCell<AllocHistory>,
100 /// Extra global state, available to the memory access hooks.
102 pub struct GlobalStateInner {
103 /// Next unused pointer ID (tag).
105 /// Table storing the "base" tag for each allocation.
106 /// The base tag is the one used for the initial pointer.
107 /// We need this in a separate table to handle cyclic statics.
108 base_ptr_ids: FxHashMap<AllocId, SbTag>,
109 /// Next unused call ID (for protectors).
110 next_call_id: CallId,
111 /// Those call IDs corresponding to functions that are still running.
112 active_calls: FxHashSet<CallId>,
113 /// The pointer ids to trace
114 tracked_pointer_tags: HashSet<PtrId>,
115 /// The call ids to trace
116 tracked_call_ids: HashSet<CallId>,
117 /// Whether to track raw pointers.
121 /// We need interior mutable access to the global state.
122 pub type GlobalState = RefCell<GlobalStateInner>;
124 /// Indicates which kind of access is being performed.
125 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
126 pub enum AccessKind {
131 impl fmt::Display for AccessKind {
132 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
134 AccessKind::Read => write!(f, "read access"),
135 AccessKind::Write => write!(f, "write access"),
140 /// Indicates which kind of reference is being created.
141 /// Used by high-level `reborrow` to compute which permissions to grant to the
143 #[derive(Copy, Clone, Hash, PartialEq, Eq)]
145 /// `&mut` and `Box`.
146 Unique { two_phase: bool },
147 /// `&` with or without interior mutability.
149 /// `*mut`/`*const` (raw pointers).
150 Raw { mutable: bool },
153 impl fmt::Display for RefKind {
154 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
156 RefKind::Unique { two_phase: false } => write!(f, "unique"),
157 RefKind::Unique { two_phase: true } => write!(f, "unique (two-phase)"),
158 RefKind::Shared => write!(f, "shared"),
159 RefKind::Raw { mutable: true } => write!(f, "raw (mutable)"),
160 RefKind::Raw { mutable: false } => write!(f, "raw (constant)"),
165 /// Utilities for initialization and ID generation
166 impl GlobalStateInner {
168 tracked_pointer_tags: HashSet<PtrId>,
169 tracked_call_ids: HashSet<CallId>,
173 next_ptr_id: NonZeroU64::new(1).unwrap(),
174 base_ptr_ids: FxHashMap::default(),
175 next_call_id: NonZeroU64::new(1).unwrap(),
176 active_calls: FxHashSet::default(),
177 tracked_pointer_tags,
183 fn new_ptr(&mut self) -> PtrId {
184 let id = self.next_ptr_id;
185 if self.tracked_pointer_tags.contains(&id) {
186 register_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(id));
188 self.next_ptr_id = NonZeroU64::new(id.get() + 1).unwrap();
192 pub fn new_call(&mut self) -> CallId {
193 let id = self.next_call_id;
194 trace!("new_call: Assigning ID {}", id);
195 if self.tracked_call_ids.contains(&id) {
196 register_diagnostic(NonHaltingDiagnostic::CreatedCallId(id));
198 assert!(self.active_calls.insert(id));
199 self.next_call_id = NonZeroU64::new(id.get() + 1).unwrap();
203 pub fn end_call(&mut self, id: CallId) {
204 assert!(self.active_calls.remove(&id));
207 fn is_active(&self, id: CallId) -> bool {
208 self.active_calls.contains(&id)
211 pub fn base_tag(&mut self, id: AllocId) -> SbTag {
212 self.base_ptr_ids.get(&id).copied().unwrap_or_else(|| {
213 let tag = SbTag::Tagged(self.new_ptr());
214 trace!("New allocation {:?} has base tag {:?}", id, tag);
215 self.base_ptr_ids.try_insert(id, tag).unwrap();
220 pub fn base_tag_untagged(&mut self, id: AllocId) -> SbTag {
221 trace!("New allocation {:?} has no base tag (untagged)", id);
222 let tag = SbTag::Untagged;
223 // This must only be done on new allocations.
224 self.base_ptr_ids.try_insert(id, tag).unwrap();
232 help: Option<String>,
233 history: Option<TagHistory>,
234 ) -> InterpError<'static> {
235 err_machine_stop!(TerminationInfo::ExperimentalUb {
239 "https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md"
245 // # Stacked Borrows Core Begin
247 /// We need to make at least the following things true:
249 /// U1: After creating a `Uniq`, it is at the top.
250 /// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it it.
251 /// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
253 /// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
254 /// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
255 /// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
257 /// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
258 /// F3: If an access happens with an `&` outside `UnsafeCell`,
259 /// it requires the `SharedReadOnly` to still be in the stack.
261 /// Core relation on `Permission` to define which accesses are allowed
263 /// This defines for a given permission, whether it permits the given kind of access.
264 fn grants(self, access: AccessKind) -> bool {
265 // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
266 self != Permission::Disabled
267 && (access == AccessKind::Read || self != Permission::SharedReadOnly)
271 /// Core per-location operations: access, dealloc, reborrow.
273 /// Find the item granting the given kind of access to the given tag, and return where
274 /// it is on the stack.
275 fn find_granting(&self, access: AccessKind, tag: SbTag) -> Option<usize> {
278 .enumerate() // we also need to know *where* in the stack
279 .rev() // search top-to-bottom
280 // Return permission of first item that grants access.
281 // We require a permission with the right tag, ensuring U3 and F3.
284 if tag == item.tag && item.perm.grants(access) { Some(idx) } else { None }
289 /// Find the first write-incompatible item above the given one --
290 /// i.e, find the height to which the stack will be truncated when writing to `granting`.
291 fn find_first_write_incompatible(&self, granting: usize) -> usize {
292 let perm = self.borrows[granting].perm;
294 Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
295 Permission::Disabled => bug!("Cannot use Disabled for anything"),
296 // On a write, everything above us is incompatible.
297 Permission::Unique => granting + 1,
298 Permission::SharedReadWrite => {
299 // The SharedReadWrite *just* above us are compatible, to skip those.
300 let mut idx = granting + 1;
301 while let Some(item) = self.borrows.get(idx) {
302 if item.perm == Permission::SharedReadWrite {
306 // Found first incompatible!
315 /// Check if the given item is protected.
317 /// The `provoking_access` argument is only used to produce diagnostics.
318 /// It is `Some` when we are granting the contained access for said tag, and it is
319 /// `None` during a deallocation.
320 /// Within `provoking_access, the `AllocRange` refers the entire operation, and
321 /// the `Size` refers to the specific location in the `AllocRange` that we are
322 /// currently checking.
325 provoking_access: Option<(SbTag, AllocRange, Size, AccessKind)>, // just for debug printing and error messages
326 global: &GlobalStateInner,
327 alloc_history: &mut AllocHistory,
328 ) -> InterpResult<'tcx> {
329 if let SbTag::Tagged(id) = item.tag {
330 if global.tracked_pointer_tags.contains(&id) {
331 register_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(
333 provoking_access.map(|(tag, _alloc_range, _size, access)| (tag, access)),
337 if let Some(call) = item.protector {
338 if global.is_active(call) {
339 if let Some((tag, alloc_range, offset, _access)) = provoking_access {
342 "not granting access to tag {:?} because incompatible item is protected: {:?}",
346 alloc_history.get_logs_relevant_to(
355 format!("deallocating while item is protected: {:?}", item),
365 /// Test if a memory `access` using pointer tagged `tag` is granted.
366 /// If yes, return the index of the item that granted it.
367 /// `range` refers the entire operation, and `offset` refers to the specific offset into the
368 /// allocation that we are currently checking.
373 (alloc_id, alloc_range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
374 global: &mut GlobalStateInner,
375 current_span: &mut CurrentSpan<'_, '_, 'tcx>,
376 alloc_history: &mut AllocHistory,
377 ) -> InterpResult<'tcx> {
378 // Two main steps: Find granting item, remove incompatible items above.
380 // Step 1: Find granting item.
381 let granting_idx = self.find_granting(access, tag).ok_or_else(|| {
382 alloc_history.access_error(access, tag, alloc_id, alloc_range, offset, self)
385 // Step 2: Remove incompatible items above them. Make sure we do not remove protected
386 // items. Behavior differs for reads and writes.
387 if access == AccessKind::Write {
388 // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
389 // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
390 let first_incompatible_idx = self.find_first_write_incompatible(granting_idx);
391 for item in self.borrows.drain(first_incompatible_idx..).rev() {
392 trace!("access: popping item {:?}", item);
393 Stack::check_protector(
395 Some((tag, alloc_range, offset, access)),
399 alloc_history.log_invalidation(item.tag, alloc_range, current_span);
402 // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
403 // The reason this is not following the stack discipline (by removing the first Unique and
404 // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
405 // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
406 // `SharedReadWrite` for `raw`.
407 // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
408 // reference and use that.
409 // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
410 for idx in ((granting_idx + 1)..self.borrows.len()).rev() {
411 let item = &mut self.borrows[idx];
412 if item.perm == Permission::Unique {
413 trace!("access: disabling item {:?}", item);
414 Stack::check_protector(
416 Some((tag, alloc_range, offset, access)),
420 item.perm = Permission::Disabled;
421 alloc_history.log_invalidation(item.tag, alloc_range, current_span);
430 /// Deallocate a location: Like a write access, but also there must be no
431 /// active protectors at all because we will remove all items.
435 (alloc_id, alloc_range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
436 global: &GlobalStateInner,
437 alloc_history: &mut AllocHistory,
438 ) -> InterpResult<'tcx> {
439 // Step 1: Find granting item.
440 self.find_granting(AccessKind::Write, tag).ok_or_else(|| {
442 "no item granting write access for deallocation to tag {:?} at {:?} found in borrow stack",
446 alloc_history.get_logs_relevant_to(tag, alloc_range, offset, None),
450 // Step 2: Remove all items. Also checks for protectors.
451 for item in self.borrows.drain(..).rev() {
452 Stack::check_protector(&item, None, global, alloc_history)?;
458 /// Derive a new pointer from one with the given tag.
459 /// `weak` controls whether this operation is weak or strong: weak granting does not act as
460 /// an access, and they add the new item directly on top of the one it is derived
461 /// from instead of all the way at the top of the stack.
462 /// `range` refers the entire operation, and `offset` refers to the specific location in
463 /// `range` that we are currently checking.
468 (alloc_id, alloc_range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
469 global: &mut GlobalStateInner,
470 current_span: &mut CurrentSpan<'_, '_, 'tcx>,
471 alloc_history: &mut AllocHistory,
472 ) -> InterpResult<'tcx> {
473 // Figure out which access `perm` corresponds to.
475 if new.perm.grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
476 // Now we figure out which item grants our parent (`derived_from`) this kind of access.
477 // We use that to determine where to put the new item.
478 let granting_idx = self.find_granting(access, derived_from).ok_or_else(|| {
479 alloc_history.grant_error(derived_from, new, alloc_id, alloc_range, offset, self)
482 // Compute where to put the new item.
483 // Either way, we ensure that we insert the new item in a way such that between
484 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
485 let new_idx = if new.perm == Permission::SharedReadWrite {
487 access == AccessKind::Write,
488 "this case only makes sense for stack-like accesses"
490 // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
491 // access. Instead of popping the stack, we insert the item at the place the stack would
492 // be popped to (i.e., we insert it above all the write-compatible items).
493 // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
494 self.find_first_write_incompatible(granting_idx)
496 // A "safe" reborrow for a pointer that actually expects some aliasing guarantees.
497 // Here, creating a reference actually counts as an access.
498 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
502 (alloc_id, alloc_range, offset),
508 // We insert "as far up as possible": We know only compatible items are remaining
509 // on top of `derived_from`, and we want the new item at the top so that we
510 // get the strongest possible guarantees.
511 // This ensures U1 and F1.
515 // Put the new item there. As an optimization, deduplicate if it is equal to one of its new neighbors.
516 if self.borrows[new_idx - 1] == new || self.borrows.get(new_idx) == Some(&new) {
517 // Optimization applies, done.
518 trace!("reborrow: avoiding adding redundant item {:?}", new);
520 trace!("reborrow: adding item {:?}", new);
521 self.borrows.insert(new_idx, new);
527 // # Stacked Borrows Core End
529 /// Map per-stack operations to higher-level per-location-range operations.
531 /// Creates new stack with initial tag.
532 fn new(size: Size, perm: Permission, tag: SbTag) -> Self {
533 let item = Item { perm, tag, protector: None };
534 let stack = Stack { borrows: vec![item] };
537 stacks: RefCell::new(RangeMap::new(size, stack)),
538 history: RefCell::new(AllocHistory::new()),
542 /// Call `f` on every stack in the range.
546 mut f: impl FnMut(Size, &mut Stack, &mut AllocHistory) -> InterpResult<'tcx>,
547 ) -> InterpResult<'tcx> {
548 let mut stacks = self.stacks.borrow_mut();
549 let history = &mut *self.history.borrow_mut();
550 for (offset, stack) in stacks.iter_mut(range.start, range.size) {
551 f(offset, stack, history)?;
556 /// Call `f` on every stack in the range.
560 mut f: impl FnMut(Size, &mut Stack, &mut AllocHistory) -> InterpResult<'tcx>,
561 ) -> InterpResult<'tcx> {
562 let stacks = self.stacks.get_mut();
563 let history = &mut *self.history.borrow_mut();
564 for (offset, stack) in stacks.iter_mut(range.start, range.size) {
565 f(offset, stack, history)?;
571 /// Glue code to connect with Miri Machine Hooks
573 pub fn new_allocation(
577 kind: MemoryKind<MiriMemoryKind>,
578 mut current_span: CurrentSpan<'_, '_, '_>,
580 let mut extra = state.borrow_mut();
581 let (base_tag, perm) = match kind {
582 // New unique borrow. This tag is not accessible by the program,
583 // so it will only ever be used when using the local directly (i.e.,
584 // not through a pointer). That is, whenever we directly write to a local, this will pop
585 // everything else off the stack, invalidating all previous pointers,
586 // and in particular, *all* raw pointers.
587 MemoryKind::Stack => (extra.base_tag(id), Permission::Unique),
588 // `Global` memory can be referenced by global pointers from `tcx`.
589 // Thus we call `global_base_ptr` such that the global pointers get the same tag
590 // as what we use here.
591 // `ExternStatic` is used for extern statics, so the same reasoning applies.
592 // The others are various forms of machine-managed special global memory, and we can get
593 // away with precise tracking there.
594 // The base pointer is not unique, so the base permission is `SharedReadWrite`.
595 MemoryKind::CallerLocation
596 | MemoryKind::Machine(
597 MiriMemoryKind::Global
598 | MiriMemoryKind::ExternStatic
599 | MiriMemoryKind::Tls
600 | MiriMemoryKind::Runtime
601 | MiriMemoryKind::Machine,
602 ) => (extra.base_tag(id), Permission::SharedReadWrite),
603 // Heap allocations we only track precisely when raw pointers are tagged, for now.
605 MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap,
608 if extra.tag_raw { extra.base_tag(id) } else { extra.base_tag_untagged(id) };
609 (tag, Permission::SharedReadWrite)
612 let stacks = Stacks::new(size, perm, base_tag);
613 stacks.history.borrow_mut().log_creation(
616 alloc_range(Size::ZERO, size),
623 pub fn memory_read<'tcx>(
629 mut current_span: CurrentSpan<'_, '_, 'tcx>,
630 ) -> InterpResult<'tcx> {
632 "read access with tag {:?}: {:?}, size {}",
634 Pointer::new(alloc_id, range.start),
637 let mut state = state.borrow_mut();
638 self.for_each(range, |offset, stack, history| {
642 (alloc_id, range, offset),
651 pub fn memory_written<'tcx>(
657 mut current_span: CurrentSpan<'_, '_, 'tcx>,
658 ) -> InterpResult<'tcx> {
660 "write access with tag {:?}: {:?}, size {}",
662 Pointer::new(alloc_id, range.start),
665 let mut state = state.borrow_mut();
666 self.for_each_mut(range, |offset, stack, history| {
670 (alloc_id, range, offset),
679 pub fn memory_deallocated<'tcx>(
685 ) -> InterpResult<'tcx> {
686 trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
687 let mut state = state.borrow_mut();
688 self.for_each_mut(range, |offset, stack, history| {
689 stack.dealloc(tag, (alloc_id, range, offset), &mut state, history)
695 /// Retagging/reborrowing. There is some policy in here, such as which permissions
696 /// to grant for which references, and when to add protectors.
697 impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
698 trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
701 place: &MPlaceTy<'tcx, Tag>,
706 ) -> InterpResult<'tcx> {
707 let this = self.eval_context_mut();
708 if size == Size::ZERO {
709 // Nothing to do for zero-sized accesses.
711 "reborrow of size 0: {} reference {:?} derived from {:?} (pointee {})",
719 let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr)?;
721 let mut current_span = this.machine.current_span();
723 let extra = this.get_alloc_extra(alloc_id)?;
724 let stacked_borrows =
725 extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
726 let mut alloc_history = stacked_borrows.history.borrow_mut();
727 alloc_history.log_creation(
730 alloc_range(base_offset, size),
734 alloc_history.log_protector(orig_tag, new_tag, &mut current_span);
738 // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
739 let (alloc_size, _) =
740 this.get_alloc_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
741 if base_offset + size > alloc_size {
742 throw_ub!(PointerOutOfBounds {
745 ptr_offset: this.machine_usize_to_isize(base_offset.bytes()),
747 msg: CheckInAllocMsg::InboundsTest
751 let protector = if protect { Some(this.frame().extra.call_id) } else { None };
753 "reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
758 Pointer::new(alloc_id, base_offset),
762 // Update the stacks.
763 // Make sure that raw pointers and mutable shared references are reborrowed "weak":
764 // There could be existing unique pointers reborrowed from them that should remain valid!
765 let perm = match kind {
766 RefKind::Unique { two_phase: false }
767 if place.layout.ty.is_unpin(this.tcx.at(DUMMY_SP), this.param_env()) =>
769 // Only if the type is unpin do we actually enforce uniqueness
772 RefKind::Unique { .. } => {
773 // Two-phase references and !Unpin references are treated as SharedReadWrite
774 Permission::SharedReadWrite
776 RefKind::Raw { mutable: true } => Permission::SharedReadWrite,
777 RefKind::Shared | RefKind::Raw { mutable: false } => {
778 // Shared references and *const are a whole different kind of game, the
779 // permission is not uniform across the entire range!
780 // We need a frozen-sensitive reborrow.
781 // We have to use shared references to alloc/memory_extra here since
782 // `visit_freeze_sensitive` needs to access the global state.
783 let extra = this.get_alloc_extra(alloc_id)?;
784 let stacked_borrows =
785 extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
786 this.visit_freeze_sensitive(place, size, |mut range, frozen| {
788 range.start += base_offset;
789 // We are only ever `SharedReadOnly` inside the frozen bits.
790 let perm = if frozen {
791 Permission::SharedReadOnly
793 Permission::SharedReadWrite
795 let item = Item { perm, tag: new_tag, protector };
796 let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
797 stacked_borrows.for_each(range, |offset, stack, history| {
801 (alloc_id, range, offset),
811 // Here we can avoid `borrow()` calls because we have mutable references.
812 // Note that this asserts that the allocation is mutable -- but since we are creating a
813 // mutable pointer, that seems reasonable.
814 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
815 let stacked_borrows =
816 alloc_extra.stacked_borrows.as_mut().expect("we should have Stacked Borrows data");
817 let item = Item { perm, tag: new_tag, protector };
818 let range = alloc_range(base_offset, size);
819 let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut();
820 let mut current_span = machine.current_span();
821 stacked_borrows.for_each_mut(range, |offset, stack, history| {
825 (alloc_id, range, offset),
835 /// Retags an indidual pointer, returning the retagged version.
836 /// `mutbl` can be `None` to make this a raw pointer.
839 val: &ImmTy<'tcx, Tag>,
842 ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
843 let this = self.eval_context_mut();
844 // We want a place for where the ptr *points to*, so we get one.
845 let place = this.ref_to_mplace(val)?;
846 let size = this.size_and_align_of_mplace(&place)?.map(|(size, _)| size);
847 // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
848 // bail out -- we cannot reasonably figure out which memory range to reborrow.
849 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
850 let size = match size {
852 None => return Ok(*val),
855 // Compute new borrow.
857 let mem_extra = this.machine.stacked_borrows.as_mut().unwrap().get_mut();
859 // Give up tracking for raw pointers.
860 RefKind::Raw { .. } if !mem_extra.tag_raw => SbTag::Untagged,
861 // All other pointers are properly tracked.
862 _ => SbTag::Tagged(mem_extra.new_ptr()),
867 this.reborrow(&place, size, kind, new_tag, protect)?;
870 let new_place = place.map_provenance(|p| {
872 // TODO: Fix this eventually
873 if let Tag::Concrete(t) = t {
874 Tag::Concrete(ConcreteTag { sb: new_tag, ..t })
881 // Return new pointer.
882 Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
886 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
887 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
888 fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
889 let this = self.eval_context_mut();
890 // Determine mutability and whether to add a protector.
891 // Cannot use `builtin_deref` because that reports *immutable* for `Box`,
892 // making it useless.
893 fn qualify(ty: ty::Ty<'_>, kind: RetagKind) -> Option<(RefKind, bool)> {
895 // References are simple.
896 ty::Ref(_, _, Mutability::Mut) =>
898 RefKind::Unique { two_phase: kind == RetagKind::TwoPhase },
899 kind == RetagKind::FnEntry,
901 ty::Ref(_, _, Mutability::Not) =>
902 Some((RefKind::Shared, kind == RetagKind::FnEntry)),
903 // Raw pointers need to be enabled.
904 ty::RawPtr(tym) if kind == RetagKind::Raw =>
905 Some((RefKind::Raw { mutable: tym.mutbl == Mutability::Mut }, false)),
906 // Boxes do not get a protector: protectors reflect that references outlive the call
907 // they were passed in to; that's just not the case for boxes.
908 ty::Adt(..) if ty.is_box() => Some((RefKind::Unique { two_phase: false }, false)),
913 // We only reborrow "bare" references/boxes.
914 // Not traversing into fields helps with <https://github.com/rust-lang/unsafe-code-guidelines/issues/125>,
915 // but might also cost us optimization and analyses. We will have to experiment more with this.
916 if let Some((mutbl, protector)) = qualify(place.layout.ty, kind) {
918 let val = this.read_immediate(&this.place_to_op(place)?)?;
919 let val = this.retag_reference(&val, mutbl, protector)?;
920 this.write_immediate(*val, place)?;
926 /// After a stack frame got pushed, retag the return place so that we are sure
927 /// it does not alias with anything.
929 /// This is a HACK because there is nothing in MIR that would make the retag
930 /// explicit. Also see https://github.com/rust-lang/rust/issues/71117.
931 fn retag_return_place(&mut self) -> InterpResult<'tcx> {
932 let this = self.eval_context_mut();
933 let return_place = if let Some(return_place) = this.frame_mut().return_place {
936 // No return place, nothing to do.
939 if return_place.layout.is_zst() {
940 // There may not be any memory here, nothing to do.
943 // We need this to be in-memory to use tagged pointers.
944 let return_place = this.force_allocation(&return_place)?;
946 // We have to turn the place into a pointer to use the existing code.
947 // (The pointer type does not matter, so we use a raw pointer.)
948 let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
949 let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
951 let val = this.retag_reference(
953 RefKind::Unique { two_phase: false },
956 // And use reborrowed pointer for return place.
957 let return_place = this.ref_to_mplace(&val)?;
958 this.frame_mut().return_place = Some(return_place.into());