1 //! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2 //! for further information.
12 use rustc_data_structures::fx::FxHashSet;
13 use rustc_middle::mir::{Mutability, RetagKind};
14 use rustc_middle::ty::{
16 layout::{HasParamEnv, LayoutOf},
19 use rustc_target::abi::{Abi, Size};
21 use crate::borrow_tracker::{
22 stacked_borrows::diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, TagHistory},
23 AccessKind, GlobalStateInner, ProtectorKind, RetagFields,
27 use diagnostics::RetagCause;
28 pub use item::{Item, Permission};
31 pub type AllocState = Stacks;
33 /// Extra per-allocation state.
34 #[derive(Clone, Debug)]
36 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
37 stacks: RangeMap<Stack>,
38 /// Stores past operations on this allocation
39 history: AllocHistory,
40 /// The set of tags that have been exposed inside this allocation.
41 exposed_tags: FxHashSet<BorTag>,
42 /// Whether this memory has been modified since the last time the tag GC ran
43 modified_since_last_gc: bool,
46 /// Indicates which permissions to grant to the retagged pointer.
47 #[derive(Clone, Debug)]
51 access: Option<AccessKind>,
52 protector: Option<ProtectorKind>,
55 freeze_perm: Permission,
56 freeze_access: Option<AccessKind>,
57 freeze_protector: Option<ProtectorKind>,
58 nonfreeze_perm: Permission,
59 nonfreeze_access: Option<AccessKind>,
60 // nonfreeze_protector must always be None
65 /// A key function: determine the permissions to grant at a retag for the given kind of
66 /// reference/pointer.
70 cx: &crate::MiriInterpCx<'_, 'tcx>,
72 let protector = (kind == RetagKind::FnEntry).then_some(ProtectorKind::StrongProtector);
74 ty::Ref(_, pointee, Mutability::Mut) => {
75 if kind == RetagKind::TwoPhase {
76 // We mostly just give up on 2phase-borrows, and treat these exactly like raw pointers.
77 assert!(protector.is_none()); // RetagKind can't be both FnEntry and TwoPhase.
78 NewPermission::Uniform {
79 perm: Permission::SharedReadWrite,
83 } else if pointee.is_unpin(*cx.tcx, cx.param_env()) {
84 // A regular full mutable reference. On `FnEntry` this is `noalias` and `dereferenceable`.
85 NewPermission::Uniform {
86 perm: Permission::Unique,
87 access: Some(AccessKind::Write),
91 // `!Unpin` dereferences do not get `noalias` nor `dereferenceable`.
92 NewPermission::Uniform {
93 perm: Permission::SharedReadWrite,
99 ty::RawPtr(ty::TypeAndMut { mutbl: Mutability::Mut, .. }) => {
100 assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
101 // Mutable raw pointer. No access, not protected.
102 NewPermission::Uniform {
103 perm: Permission::SharedReadWrite,
108 ty::Ref(_, _pointee, Mutability::Not) => {
109 // Shared references. If frozen, these get `noalias` and `dereferenceable`; otherwise neither.
110 NewPermission::FreezeSensitive {
111 freeze_perm: Permission::SharedReadOnly,
112 freeze_access: Some(AccessKind::Read),
113 freeze_protector: protector,
114 nonfreeze_perm: Permission::SharedReadWrite,
115 // Inside UnsafeCell, this does *not* count as an access, as there
116 // might actually be mutable references further up the stack that
117 // we have to keep alive.
118 nonfreeze_access: None,
119 // We do not protect inside UnsafeCell.
120 // This fixes https://github.com/rust-lang/rust/issues/55005.
123 ty::RawPtr(ty::TypeAndMut { mutbl: Mutability::Not, .. }) => {
124 assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
125 // `*const T`, when freshly created, are read-only in the frozen part.
126 NewPermission::FreezeSensitive {
127 freeze_perm: Permission::SharedReadOnly,
128 freeze_access: Some(AccessKind::Read),
129 freeze_protector: None,
130 nonfreeze_perm: Permission::SharedReadWrite,
131 nonfreeze_access: None,
138 fn from_box_ty<'tcx>(
141 cx: &crate::MiriInterpCx<'_, 'tcx>,
143 // `ty` is not the `Box` but the field of the Box with this pointer (due to allocator handling).
144 let pointee = ty.builtin_deref(true).unwrap().ty;
145 if pointee.is_unpin(*cx.tcx, cx.param_env()) {
146 // A regular box. On `FnEntry` this is `noalias`, but not `dereferenceable` (hence only
147 // a weak protector).
148 NewPermission::Uniform {
149 perm: Permission::Unique,
150 access: Some(AccessKind::Write),
151 protector: (kind == RetagKind::FnEntry)
152 .then_some(ProtectorKind::WeakProtector),
155 // `!Unpin` boxes do not get `noalias` nor `dereferenceable`.
156 NewPermission::Uniform {
157 perm: Permission::SharedReadWrite,
164 fn protector(&self) -> Option<ProtectorKind> {
166 NewPermission::Uniform { protector, .. } => *protector,
167 NewPermission::FreezeSensitive { freeze_protector, .. } => *freeze_protector,
173 pub fn err_sb_ub<'tcx>(
175 help: Option<String>,
176 history: Option<TagHistory>,
177 ) -> InterpError<'tcx> {
178 err_machine_stop!(TerminationInfo::StackedBorrowsUb { msg, help, history })
181 // # Stacked Borrows Core Begin
183 /// We need to make at least the following things true:
185 /// U1: After creating a `Uniq`, it is at the top.
186 /// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
187 /// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
189 /// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
190 /// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
191 /// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
193 /// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
194 /// F3: If an access happens with an `&` outside `UnsafeCell`,
195 /// it requires the `SharedReadOnly` to still be in the stack.
197 /// Core relation on `Permission` to define which accesses are allowed
199 /// This defines for a given permission, whether it permits the given kind of access.
200 fn grants(self, access: AccessKind) -> bool {
201 // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
202 self != Permission::Disabled
203 && (access == AccessKind::Read || self != Permission::SharedReadOnly)
207 /// Determines whether an item was invalidated by a conflicting access, or by deallocation.
208 #[derive(Copy, Clone, Debug)]
209 enum ItemInvalidationCause {
214 /// Core per-location operations: access, dealloc, reborrow.
216 /// Find the first write-incompatible item above the given one --
217 /// i.e, find the height to which the stack will be truncated when writing to `granting`.
218 fn find_first_write_incompatible(&self, granting: usize) -> usize {
219 let perm = self.get(granting).unwrap().perm();
221 Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
222 Permission::Disabled => bug!("Cannot use Disabled for anything"),
223 Permission::Unique => {
224 // On a write, everything above us is incompatible.
227 Permission::SharedReadWrite => {
228 // The SharedReadWrite *just* above us are compatible, to skip those.
229 let mut idx = granting + 1;
230 while let Some(item) = self.get(idx) {
231 if item.perm() == Permission::SharedReadWrite {
235 // Found first incompatible!
244 /// The given item was invalidated -- check its protectors for whether that will cause UB.
247 global: &GlobalStateInner,
248 dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
249 cause: ItemInvalidationCause,
250 ) -> InterpResult<'tcx> {
251 if !global.tracked_pointer_tags.is_empty() {
252 dcx.check_tracked_tag_popped(item, global);
255 if !item.protected() {
259 // We store tags twice, once in global.protected_tags and once in each call frame.
260 // We do this because consulting a single global set in this function is faster
261 // than attempting to search all call frames in the program for the `FrameExtra`
262 // (if any) which is protecting the popped tag.
264 // This duplication trades off making `end_call` slower to make this function faster. This
265 // trade-off is profitable in practice for a combination of two reasons.
266 // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
267 // Therefore, adding overhead in function call/return is profitable even if it only
268 // saves a little work in this function.
269 // 2. Most frames protect only one or two tags. So this duplicative global turns a search
270 // which ends up about linear in the number of protected tags in the program into a
271 // constant time check (and a slow linear, because the tags in the frames aren't contiguous).
272 if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
273 // The only way this is okay is if the protector is weak and we are deallocating with
274 // the right pointer.
275 let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
276 && matches!(protector_kind, ProtectorKind::WeakProtector);
278 return Err(dcx.protector_error(item, protector_kind).into());
284 /// Test if a memory `access` using pointer tagged `tag` is granted.
285 /// If yes, return the index of the item that granted it.
286 /// `range` refers the entire operation, and `offset` refers to the specific offset into the
287 /// allocation that we are currently checking.
291 tag: ProvenanceExtra,
292 global: &GlobalStateInner,
293 dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
294 exposed_tags: &FxHashSet<BorTag>,
295 ) -> InterpResult<'tcx> {
296 // Two main steps: Find granting item, remove incompatible items above.
298 // Step 1: Find granting item.
300 self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
302 // Step 2: Remove incompatible items above them. Make sure we do not remove protected
303 // items. Behavior differs for reads and writes.
304 // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
305 if access == AccessKind::Write {
306 // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
307 // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
308 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
309 // The granting_idx *might* be approximate, but any lower idx would remove more
310 // things. Even if this is a Unique and the lower idx is an SRW (which removes
311 // less), there is an SRW group boundary here so strictly more would get removed.
312 self.find_first_write_incompatible(granting_idx)
314 // We are writing to something in the unknown part.
315 // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
318 self.pop_items_after(first_incompatible_idx, |item| {
319 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
320 dcx.log_invalidation(item.tag());
324 // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
325 // The reason this is not following the stack discipline (by removing the first Unique and
326 // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
327 // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
328 // `SharedReadWrite` for `raw`.
329 // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
330 // reference and use that.
331 // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
332 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
333 // The granting_idx *might* be approximate, but any lower idx would disable more things.
336 // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
339 self.disable_uniques_starting_at(first_incompatible_idx, |item| {
340 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
341 dcx.log_invalidation(item.tag());
346 // If this was an approximate action, we now collapse everything into an unknown.
347 if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
348 // Compute the upper bound of the items that remain.
349 // (This is why we did all the work above: to reduce the items we have to consider here.)
350 let mut max = BorTag::one();
351 for i in 0..self.len() {
352 let item = self.get(i).unwrap();
353 // Skip disabled items, they cannot be matched anyway.
354 if !matches!(item.perm(), Permission::Disabled) {
355 // We are looking for a strict upper bound, so add 1 to this tag.
356 max = cmp::max(item.tag().succ().unwrap(), max);
359 if let Some(unk) = self.unknown_bottom() {
360 max = cmp::max(unk, max);
362 // Use `max` as new strict upper bound for everything.
364 "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
367 self.set_unknown_bottom(max);
374 /// Deallocate a location: Like a write access, but also there must be no
375 /// active protectors at all because we will remove all items.
378 tag: ProvenanceExtra,
379 global: &GlobalStateInner,
380 dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
381 exposed_tags: &FxHashSet<BorTag>,
382 ) -> InterpResult<'tcx> {
383 // Step 1: Make a write access.
384 // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
385 self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
387 // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
388 for idx in (0..self.len()).rev() {
389 let item = self.get(idx).unwrap();
390 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
396 /// Derive a new pointer from one with the given tag.
398 /// `access` indicates which kind of memory access this retag itself should correspond to.
401 derived_from: ProvenanceExtra,
403 access: Option<AccessKind>,
404 global: &GlobalStateInner,
405 dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
406 exposed_tags: &FxHashSet<BorTag>,
407 ) -> InterpResult<'tcx> {
408 dcx.start_grant(new.perm());
410 // Compute where to put the new item.
411 // Either way, we ensure that we insert the new item in a way such that between
412 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
413 let new_idx = if let Some(access) = access {
414 // Simple case: We are just a regular memory access, and then push our thing on top,
415 // like a regular stack.
416 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
417 self.access(access, derived_from, global, dcx, exposed_tags)?;
419 // We insert "as far up as possible": We know only compatible items are remaining
420 // on top of `derived_from`, and we want the new item at the top so that we
421 // get the strongest possible guarantees.
422 // This ensures U1 and F1.
425 // The tricky case: creating a new SRW permission without actually being an access.
426 assert!(new.perm() == Permission::SharedReadWrite);
428 // First we figure out which item grants our parent (`derived_from`) this kind of access.
429 // We use that to determine where to put the new item.
430 let granting_idx = self
431 .find_granting(AccessKind::Write, derived_from, exposed_tags)
432 .map_err(|()| dcx.grant_error(self))?;
434 let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from) else {
435 // The parent is a wildcard pointer or matched the unknown bottom.
436 // This is approximate. Nobody knows what happened, so forget everything.
437 // The new thing is SRW anyway, so we cannot push it "on top of the unkown part"
438 // (for all we know, it might join an SRW group inside the unknown).
439 trace!("reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown");
440 self.set_unknown_bottom(global.next_ptr_tag);
444 // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
445 // access. Instead of popping the stack, we insert the item at the place the stack would
446 // be popped to (i.e., we insert it above all the write-compatible items).
447 // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
448 self.find_first_write_incompatible(granting_idx)
451 // Put the new item there.
452 trace!("reborrow: adding item {:?}", new);
453 self.insert(new_idx, new);
457 // # Stacked Borrows Core End
459 /// Integration with the BorTag garbage collector
461 pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
462 if self.modified_since_last_gc {
463 for stack in self.stacks.iter_mut_all() {
464 if stack.len() > 64 {
465 stack.retain(live_tags);
468 self.modified_since_last_gc = false;
473 impl VisitTags for Stacks {
474 fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
475 for tag in self.exposed_tags.iter().copied() {
481 /// Map per-stack operations to higher-level per-location-range operations.
483 /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
484 /// the [`AllocId`] of the allocation this is associated with.
490 machine: &MiriMachine<'_, '_>,
492 let item = Item::new(tag, perm, false);
493 let stack = Stack::new(item);
496 stacks: RangeMap::new(size, stack),
497 history: AllocHistory::new(id, item, machine),
498 exposed_tags: FxHashSet::default(),
499 modified_since_last_gc: false,
503 /// Call `f` on every stack in the range.
507 mut dcx_builder: DiagnosticCxBuilder<'_, '_, 'tcx>,
510 &mut DiagnosticCx<'_, '_, '_, 'tcx>,
511 &mut FxHashSet<BorTag>,
512 ) -> InterpResult<'tcx>,
513 ) -> InterpResult<'tcx> {
514 self.modified_since_last_gc = true;
515 for (offset, stack) in self.stacks.iter_mut(range.start, range.size) {
516 let mut dcx = dcx_builder.build(&mut self.history, offset);
517 f(stack, &mut dcx, &mut self.exposed_tags)?;
518 dcx_builder = dcx.unbuild();
524 /// Glue code to connect with Miri Machine Hooks
526 pub fn new_allocation(
529 state: &mut GlobalStateInner,
530 kind: MemoryKind<MiriMemoryKind>,
531 machine: &MiriMachine<'_, '_>,
533 let (base_tag, perm) = match kind {
534 // New unique borrow. This tag is not accessible by the program,
535 // so it will only ever be used when using the local directly (i.e.,
536 // not through a pointer). That is, whenever we directly write to a local, this will pop
537 // everything else off the stack, invalidating all previous pointers,
538 // and in particular, *all* raw pointers.
539 MemoryKind::Stack => (state.base_ptr_tag(id, machine), Permission::Unique),
540 // Everything else is shared by default.
541 _ => (state.base_ptr_tag(id, machine), Permission::SharedReadWrite),
543 Stacks::new(size, perm, base_tag, id, machine)
547 pub fn before_memory_read<'tcx, 'mir, 'ecx>(
550 tag: ProvenanceExtra,
552 machine: &'ecx MiriMachine<'mir, 'tcx>,
553 ) -> InterpResult<'tcx>
558 "read access with tag {:?}: {:?}, size {}",
560 Pointer::new(alloc_id, range.start),
563 let dcx = DiagnosticCxBuilder::read(machine, tag, range);
564 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
565 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
566 stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
571 pub fn before_memory_write<'tcx>(
574 tag: ProvenanceExtra,
576 machine: &mut MiriMachine<'_, 'tcx>,
577 ) -> InterpResult<'tcx> {
579 "write access with tag {:?}: {:?}, size {}",
581 Pointer::new(alloc_id, range.start),
584 let dcx = DiagnosticCxBuilder::write(machine, tag, range);
585 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
586 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
587 stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
592 pub fn before_memory_deallocation<'tcx>(
595 tag: ProvenanceExtra,
597 machine: &mut MiriMachine<'_, 'tcx>,
598 ) -> InterpResult<'tcx> {
599 trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
600 let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
601 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
602 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
603 stack.dealloc(tag, &state, dcx, exposed_tags)
609 /// Retagging/reborrowing. There is some policy in here, such as which permissions
610 /// to grant for which references, and when to add protectors.
611 impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx>
612 for crate::MiriInterpCx<'mir, 'tcx>
615 trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'mir, 'tcx> {
616 /// Returns the `AllocId` the reborrow was done in, if some actual borrow stack manipulation
620 place: &MPlaceTy<'tcx, Provenance>,
622 new_perm: NewPermission,
624 retag_cause: RetagCause, // What caused this retag, for diagnostics only
625 ) -> InterpResult<'tcx, Option<AllocId>> {
626 let this = self.eval_context_mut();
628 // It is crucial that this gets called on all code paths, to ensure we track tag creation.
629 let log_creation = |this: &MiriInterpCx<'mir, 'tcx>,
630 loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
631 -> InterpResult<'tcx> {
632 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
633 let ty = place.layout.ty;
634 if global.tracked_pointer_tags.contains(&new_tag) {
635 let mut kind_str = String::new();
637 NewPermission::Uniform { perm, .. } =>
638 write!(kind_str, "{perm:?} permission").unwrap(),
639 NewPermission::FreezeSensitive { freeze_perm, .. } if ty.is_freeze(*this.tcx, this.param_env()) =>
640 write!(kind_str, "{freeze_perm:?} permission").unwrap(),
641 NewPermission::FreezeSensitive { freeze_perm, nonfreeze_perm, .. } =>
642 write!(kind_str, "{freeze_perm:?}/{nonfreeze_perm:?} permission for frozen/non-frozen parts").unwrap(),
644 write!(kind_str, " (pointee type {ty})").unwrap();
645 this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
648 loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
651 drop(global); // don't hold that reference any longer than we have to
653 let Some((alloc_id, base_offset, orig_tag)) = loc else {
657 let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id);
659 AllocKind::LiveData => {
660 // This should have alloc_extra data, but `get_alloc_extra` can still fail
661 // if converting this alloc_id from a global to a local one
662 // uncovers a non-supported `extern static`.
663 let extra = this.get_alloc_extra(alloc_id)?;
664 let mut stacked_borrows = extra
667 // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
668 // FIXME: can this be done cleaner?
669 let dcx = DiagnosticCxBuilder::retag(
674 alloc_range(base_offset, size),
676 let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
678 if new_perm.protector().is_some() {
682 AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
683 // No stacked borrows on these allocations.
689 if size == Size::ZERO {
691 "reborrow of size 0: reference {:?} derived from {:?} (pointee {})",
696 // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
697 // touches no bytes so there is no stack to put this tag in.
698 // However, if the pointer for this operation points at a real allocation we still
699 // record where it was created so that we can issue a helpful diagnostic if there is an
700 // attempt to use it for a non-zero-sized access.
701 // Dangling slices are a common case here; it's valid to get their length but with raw
702 // pointer tagging for example all calls to get_unchecked on them are invalid.
703 if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr) {
704 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
705 return Ok(Some(alloc_id));
707 // This pointer doesn't come with an AllocId. :shrug:
708 log_creation(this, None)?;
712 let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr)?;
713 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
715 // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
716 let (alloc_size, _) = this.get_live_alloc_size_and_align(alloc_id)?;
717 if base_offset + size > alloc_size {
718 throw_ub!(PointerOutOfBounds {
721 ptr_offset: this.machine_usize_to_isize(base_offset.bytes()),
723 msg: CheckInAllocMsg::InboundsTest
728 "reborrow: reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
732 Pointer::new(alloc_id, base_offset),
736 if let Some(protect) = new_perm.protector() {
737 // See comment in `Stack::item_invalidated` for why we store the tag twice.
738 this.frame_mut().extra.borrow_tracker.as_mut().unwrap().protected_tags.push(new_tag);
745 .insert(new_tag, protect);
748 // Update the stacks, according to the new permission information we are given.
750 NewPermission::Uniform { perm, access, protector } => {
751 assert!(perm != Permission::SharedReadOnly);
752 // Here we can avoid `borrow()` calls because we have mutable references.
753 // Note that this asserts that the allocation is mutable -- but since we are creating a
754 // mutable pointer, that seems reasonable.
755 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
756 let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
757 let item = Item::new(new_tag, perm, protector.is_some());
758 let range = alloc_range(base_offset, size);
759 let global = machine.borrow_tracker.as_ref().unwrap().borrow();
760 let dcx = DiagnosticCxBuilder::retag(
765 alloc_range(base_offset, size),
767 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
768 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
771 if let Some(access) = access {
772 assert_eq!(access, AccessKind::Write);
773 // Make sure the data race model also knows about this.
774 if let Some(data_race) = alloc_extra.data_race.as_mut() {
775 data_race.write(alloc_id, range, machine)?;
779 NewPermission::FreezeSensitive {
786 // The permission is not uniform across the entire range!
787 // We need a frozen-sensitive reborrow.
788 // We have to use shared references to alloc/memory_extra here since
789 // `visit_freeze_sensitive` needs to access the global state.
790 let alloc_extra = this.get_alloc_extra(alloc_id)?;
791 let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
792 this.visit_freeze_sensitive(place, size, |mut range, frozen| {
794 range.start += base_offset;
795 // We are only ever `SharedReadOnly` inside the frozen bits.
796 let (perm, access, protector) = if frozen {
797 (freeze_perm, freeze_access, freeze_protector)
799 (nonfreeze_perm, nonfreeze_access, None)
801 let item = Item::new(new_tag, perm, protector.is_some());
802 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
803 let dcx = DiagnosticCxBuilder::retag(
808 alloc_range(base_offset, size),
810 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
811 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
814 if let Some(access) = access {
815 assert_eq!(access, AccessKind::Read);
816 // Make sure the data race model also knows about this.
817 if let Some(data_race) = alloc_extra.data_race.as_ref() {
818 data_race.read(alloc_id, range, &this.machine)?;
829 /// Retags an indidual pointer, returning the retagged version.
830 /// `kind` indicates what kind of reference is being created.
831 fn sb_retag_reference(
833 val: &ImmTy<'tcx, Provenance>,
834 new_perm: NewPermission,
835 cause: RetagCause, // What caused this retag, for diagnostics only
836 ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
837 let this = self.eval_context_mut();
838 // We want a place for where the ptr *points to*, so we get one.
839 let place = this.ref_to_mplace(val)?;
840 let size = this.size_and_align_of_mplace(&place)?.map(|(size, _)| size);
841 // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
842 // bail out -- we cannot reasonably figure out which memory range to reborrow.
843 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
844 let size = match size {
846 None => return Ok(val.clone()),
849 // Compute new borrow.
850 let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
853 let alloc_id = this.sb_reborrow(&place, size, new_perm, new_tag, cause)?;
856 let new_place = place.map_provenance(|p| {
860 // If `reborrow` could figure out the AllocId of this ptr, hard-code it into the new one.
861 // Even if we started out with a wildcard, this newly retagged pointer is tied to that allocation.
862 Provenance::Concrete { alloc_id, tag: new_tag }
865 // Looks like this has to stay a wildcard pointer.
866 assert!(matches!(prov, Provenance::Wildcard));
873 // Return new pointer.
874 Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
878 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
879 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
880 fn sb_retag_ptr_value(
883 val: &ImmTy<'tcx, Provenance>,
884 ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
885 let this = self.eval_context_mut();
886 let new_perm = NewPermission::from_ref_ty(val.layout.ty, kind, this);
887 let retag_cause = match kind {
888 RetagKind::TwoPhase { .. } => RetagCause::TwoPhase,
889 RetagKind::FnEntry => unreachable!(),
890 RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
892 this.sb_retag_reference(val, new_perm, retag_cause)
895 fn sb_retag_place_contents(
898 place: &PlaceTy<'tcx, Provenance>,
899 ) -> InterpResult<'tcx> {
900 let this = self.eval_context_mut();
901 let retag_fields = this.machine.borrow_tracker.as_mut().unwrap().get_mut().retag_fields;
902 let retag_cause = match kind {
903 RetagKind::Raw | RetagKind::TwoPhase { .. } => unreachable!(), // these can only happen in `retag_ptr_value`
904 RetagKind::FnEntry => RetagCause::FnEntry,
905 RetagKind::Default => RetagCause::Normal,
907 let mut visitor = RetagVisitor { ecx: this, kind, retag_cause, retag_fields };
908 return visitor.visit_value(place);
910 // The actual visitor.
911 struct RetagVisitor<'ecx, 'mir, 'tcx> {
912 ecx: &'ecx mut MiriInterpCx<'mir, 'tcx>,
914 retag_cause: RetagCause,
915 retag_fields: RetagFields,
917 impl<'ecx, 'mir, 'tcx> RetagVisitor<'ecx, 'mir, 'tcx> {
918 #[inline(always)] // yes this helps in our benchmarks
919 fn retag_ptr_inplace(
921 place: &PlaceTy<'tcx, Provenance>,
922 new_perm: NewPermission,
923 retag_cause: RetagCause,
924 ) -> InterpResult<'tcx> {
925 let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
926 let val = self.ecx.sb_retag_reference(&val, new_perm, retag_cause)?;
927 self.ecx.write_immediate(*val, place)?;
931 impl<'ecx, 'mir, 'tcx> MutValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>>
932 for RetagVisitor<'ecx, 'mir, 'tcx>
934 type V = PlaceTy<'tcx, Provenance>;
937 fn ecx(&mut self) -> &mut MiriInterpCx<'mir, 'tcx> {
941 fn visit_box(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
942 // Boxes get a weak protectors, since they may be deallocated.
943 let new_perm = NewPermission::from_box_ty(place.layout.ty, self.kind, self.ecx);
944 self.retag_ptr_inplace(place, new_perm, self.retag_cause)
947 fn visit_value(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
948 // If this place is smaller than a pointer, we know that it can't contain any
949 // pointers we need to retag, so we can stop recursion early.
950 // This optimization is crucial for ZSTs, because they can contain way more fields
951 // than we can ever visit.
952 if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
956 // Check the type of this value to see what to do with it (retag, or recurse).
957 match place.layout.ty.kind() {
960 NewPermission::from_ref_ty(place.layout.ty, self.kind, self.ecx);
961 self.retag_ptr_inplace(place, new_perm, self.retag_cause)?;
964 // We do *not* want to recurse into raw pointers -- wide raw pointers have
965 // fields, and for dyn Trait pointees those can have reference type!
967 ty::Adt(adt, _) if adt.is_box() => {
968 // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
969 // (Yes this means we technically also recursively retag the allocator itself
970 // even if field retagging is not enabled. *shrug*)
971 self.walk_value(place)?;
974 // Not a reference/pointer/box. Only recurse if configured appropriately.
975 let recurse = match self.retag_fields {
976 RetagFields::No => false,
977 RetagFields::Yes => true,
978 RetagFields::OnlyScalar => {
979 // Matching `ArgAbi::new` at the time of writing, only fields of
980 // `Scalar` and `ScalarPair` ABI are considered.
981 matches!(place.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..))
985 self.walk_value(place)?;
995 /// After a stack frame got pushed, retag the return place so that we are sure
996 /// it does not alias with anything.
998 /// This is a HACK because there is nothing in MIR that would make the retag
999 /// explicit. Also see <https://github.com/rust-lang/rust/issues/71117>.
1000 fn sb_retag_return_place(&mut self) -> InterpResult<'tcx> {
1001 let this = self.eval_context_mut();
1002 let return_place = &this.frame().return_place;
1003 if return_place.layout.is_zst() {
1004 // There may not be any memory here, nothing to do.
1007 // We need this to be in-memory to use tagged pointers.
1008 let return_place = this.force_allocation(&return_place.clone())?;
1010 // We have to turn the place into a pointer to use the existing code.
1011 // (The pointer type does not matter, so we use a raw pointer.)
1012 let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
1013 let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
1014 // Reborrow it. With protection! That is part of the point.
1015 let new_perm = NewPermission::Uniform {
1016 perm: Permission::Unique,
1017 access: Some(AccessKind::Write),
1018 protector: Some(ProtectorKind::StrongProtector),
1020 let val = this.sb_retag_reference(&val, new_perm, RetagCause::FnReturnPlace)?;
1021 // And use reborrowed pointer for return place.
1022 let return_place = this.ref_to_mplace(&val)?;
1023 this.frame_mut().return_place = return_place.into();
1028 /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
1029 fn sb_expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
1030 let this = self.eval_context_mut();
1032 // Function pointers and dead objects don't have an alloc_extra so we ignore them.
1033 // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
1034 // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
1035 let (_size, _align, kind) = this.get_alloc_info(alloc_id);
1037 AllocKind::LiveData => {
1038 // This should have alloc_extra data, but `get_alloc_extra` can still fail
1039 // if converting this alloc_id from a global to a local one
1040 // uncovers a non-supported `extern static`.
1041 let alloc_extra = this.get_alloc_extra(alloc_id)?;
1042 trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
1043 alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
1045 AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
1046 // No stacked borrows on these allocations.
1052 fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
1053 let this = self.eval_context_mut();
1054 let alloc_extra = this.get_alloc_extra(alloc_id)?;
1055 let stacks = alloc_extra.borrow_tracker_sb().borrow();
1056 for (range, stack) in stacks.stacks.iter_all() {
1057 print!("{range:?}: [");
1058 if let Some(bottom) = stack.unknown_bottom() {
1059 print!(" unknown-bottom(..{bottom:?})");
1061 for i in 0..stack.len() {
1062 let item = stack.get(i).unwrap();
1063 print!(" {:?}{:?}", item.perm(), item.tag());