1 //! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2 //! for further information.
5 use std::cell::RefCell;
8 use std::num::NonZeroU64;
10 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
11 use rustc_hir::Mutability;
12 use rustc_middle::mir::RetagKind;
13 use rustc_middle::ty::{
15 layout::{HasParamEnv, LayoutOf},
17 use rustc_span::DUMMY_SP;
18 use rustc_target::abi::Size;
19 use std::collections::HashSet;
24 use diagnostics::{AllocHistory, TagHistory};
29 pub type CallId = NonZeroU64;
31 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
32 pub type AllocExtra = RefCell<Stacks>;
34 /// Tracking pointer provenance
35 #[derive(Copy, Clone, Hash, PartialEq, Eq)]
36 pub struct SbTag(NonZeroU64);
39 pub fn new(i: u64) -> Option<Self> {
40 NonZeroU64::new(i).map(SbTag)
43 // The default to be used when SB is disabled
44 pub fn default() -> Self {
49 impl fmt::Debug for SbTag {
50 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
51 write!(f, "<{}>", self.0)
55 /// The "extra" information an SB pointer has over a regular AllocId.
56 /// Newtype for `Option<SbTag>`.
57 #[derive(Copy, Clone)]
63 impl fmt::Debug for SbTagExtra {
64 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
66 SbTagExtra::Concrete(pid) => write!(f, "{pid:?}"),
67 SbTagExtra::Wildcard => write!(f, "<wildcard>"),
73 fn and_then<T>(self, f: impl FnOnce(SbTag) -> Option<T>) -> Option<T> {
75 SbTagExtra::Concrete(pid) => f(pid),
76 SbTagExtra::Wildcard => None,
81 /// Indicates which permission is granted (by this item to some pointers)
82 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
84 /// Grants unique mutable access.
86 /// Grants shared mutable access.
88 /// Grants shared read-only access.
90 /// Grants no access, but separates two groups of SharedReadWrite so they are not
91 /// all considered mutually compatible.
95 /// An item in the per-location borrow stack.
96 #[derive(Copy, Clone, Hash, PartialEq, Eq)]
98 /// The permission this item grants.
100 /// The pointers the permission is granted to.
102 /// An optional protector, ensuring the item cannot get popped until `CallId` is over.
103 protector: Option<CallId>,
106 impl fmt::Debug for Item {
107 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
108 write!(f, "[{:?} for {:?}", self.perm, self.tag)?;
109 if let Some(call) = self.protector {
110 write!(f, " (call {})", call)?;
117 /// Extra per-allocation state.
118 #[derive(Clone, Debug)]
120 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
121 stacks: RangeMap<Stack>,
122 /// Stores past operations on this allocation
123 history: AllocHistory,
124 /// The set of tags that have been exposed inside this allocation.
125 exposed_tags: FxHashSet<SbTag>,
128 /// Extra global state, available to the memory access hooks.
130 pub struct GlobalStateInner {
131 /// Next unused pointer ID (tag).
133 /// Table storing the "base" tag for each allocation.
134 /// The base tag is the one used for the initial pointer.
135 /// We need this in a separate table to handle cyclic statics.
136 base_ptr_tags: FxHashMap<AllocId, SbTag>,
137 /// Next unused call ID (for protectors).
138 next_call_id: CallId,
139 /// Those call IDs corresponding to functions that are still running.
140 active_calls: FxHashSet<CallId>,
141 /// The pointer ids to trace
142 tracked_pointer_tags: HashSet<SbTag>,
143 /// The call ids to trace
144 tracked_call_ids: HashSet<CallId>,
145 /// Whether to recurse into datatypes when searching for pointers to retag.
149 /// We need interior mutable access to the global state.
150 pub type GlobalState = RefCell<GlobalStateInner>;
152 /// Indicates which kind of access is being performed.
153 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
154 pub enum AccessKind {
159 impl fmt::Display for AccessKind {
160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162 AccessKind::Read => write!(f, "read access"),
163 AccessKind::Write => write!(f, "write access"),
168 /// Indicates which kind of reference is being created.
169 /// Used by high-level `reborrow` to compute which permissions to grant to the
171 #[derive(Copy, Clone, Hash, PartialEq, Eq)]
173 /// `&mut` and `Box`.
174 Unique { two_phase: bool },
175 /// `&` with or without interior mutability.
177 /// `*mut`/`*const` (raw pointers).
178 Raw { mutable: bool },
181 impl fmt::Display for RefKind {
182 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
184 RefKind::Unique { two_phase: false } => write!(f, "unique"),
185 RefKind::Unique { two_phase: true } => write!(f, "unique (two-phase)"),
186 RefKind::Shared => write!(f, "shared"),
187 RefKind::Raw { mutable: true } => write!(f, "raw (mutable)"),
188 RefKind::Raw { mutable: false } => write!(f, "raw (constant)"),
193 /// Utilities for initialization and ID generation
194 impl GlobalStateInner {
196 tracked_pointer_tags: HashSet<SbTag>,
197 tracked_call_ids: HashSet<CallId>,
201 next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
202 base_ptr_tags: FxHashMap::default(),
203 next_call_id: NonZeroU64::new(1).unwrap(),
204 active_calls: FxHashSet::default(),
205 tracked_pointer_tags,
211 /// Generates a new pointer tag. Remember to also check track_pointer_tags and log its creation!
212 fn new_ptr(&mut self) -> SbTag {
213 let id = self.next_ptr_tag;
214 self.next_ptr_tag = SbTag(NonZeroU64::new(id.0.get() + 1).unwrap());
218 pub fn new_call(&mut self) -> CallId {
219 let id = self.next_call_id;
220 trace!("new_call: Assigning ID {}", id);
221 if self.tracked_call_ids.contains(&id) {
222 register_diagnostic(NonHaltingDiagnostic::CreatedCallId(id));
224 assert!(self.active_calls.insert(id));
225 self.next_call_id = NonZeroU64::new(id.get() + 1).unwrap();
229 pub fn end_call(&mut self, id: CallId) {
230 assert!(self.active_calls.remove(&id));
233 fn is_active(&self, id: CallId) -> bool {
234 self.active_calls.contains(&id)
237 pub fn base_ptr_tag(&mut self, id: AllocId) -> SbTag {
238 self.base_ptr_tags.get(&id).copied().unwrap_or_else(|| {
239 let tag = self.new_ptr();
240 if self.tracked_pointer_tags.contains(&tag) {
241 register_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(tag.0, None));
243 trace!("New allocation {:?} has base tag {:?}", id, tag);
244 self.base_ptr_tags.try_insert(id, tag).unwrap();
251 pub fn err_sb_ub<'tcx>(
253 help: Option<String>,
254 history: Option<TagHistory>,
255 ) -> InterpError<'tcx> {
256 err_machine_stop!(TerminationInfo::StackedBorrowsUb { msg, help, history })
259 // # Stacked Borrows Core Begin
261 /// We need to make at least the following things true:
263 /// U1: After creating a `Uniq`, it is at the top.
264 /// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it it.
265 /// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
267 /// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
268 /// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
269 /// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
271 /// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
272 /// F3: If an access happens with an `&` outside `UnsafeCell`,
273 /// it requires the `SharedReadOnly` to still be in the stack.
275 /// Core relation on `Permission` to define which accesses are allowed
277 /// This defines for a given permission, whether it permits the given kind of access.
278 fn grants(self, access: AccessKind) -> bool {
279 // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
280 self != Permission::Disabled
281 && (access == AccessKind::Read || self != Permission::SharedReadOnly)
285 /// Core per-location operations: access, dealloc, reborrow.
287 /// Find the first write-incompatible item above the given one --
288 /// i.e, find the height to which the stack will be truncated when writing to `granting`.
289 fn find_first_write_incompatible(&self, granting: usize) -> usize {
290 let perm = self.get(granting).unwrap().perm;
292 Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
293 Permission::Disabled => bug!("Cannot use Disabled for anything"),
294 Permission::Unique => {
295 // On a write, everything above us is incompatible.
298 Permission::SharedReadWrite => {
299 // The SharedReadWrite *just* above us are compatible, to skip those.
300 let mut idx = granting + 1;
301 while let Some(item) = self.get(idx) {
302 if item.perm == Permission::SharedReadWrite {
306 // Found first incompatible!
315 /// Check if the given item is protected.
317 /// The `provoking_access` argument is only used to produce diagnostics.
318 /// It is `Some` when we are granting the contained access for said tag, and it is
319 /// `None` during a deallocation.
320 /// Within `provoking_access, the `AllocRange` refers the entire operation, and
321 /// the `Size` refers to the specific location in the `AllocRange` that we are
322 /// currently checking.
325 provoking_access: Option<(SbTagExtra, AllocRange, Size, AccessKind)>, // just for debug printing and error messages
326 global: &GlobalStateInner,
327 alloc_history: &mut AllocHistory,
328 ) -> InterpResult<'tcx> {
329 if global.tracked_pointer_tags.contains(&item.tag) {
330 register_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(
332 provoking_access.map(|(tag, _alloc_range, _size, access)| (tag, access)),
336 if let Some(call) = item.protector {
337 if global.is_active(call) {
338 if let Some((tag, _alloc_range, _offset, _access)) = provoking_access {
341 "not granting access to tag {:?} because incompatible item is protected: {:?}",
345 tag.and_then(|tag| alloc_history.get_logs_relevant_to(tag, Some(item.tag))),
349 format!("deallocating while item is protected: {:?}", item),
359 /// Test if a memory `access` using pointer tagged `tag` is granted.
360 /// If yes, return the index of the item that granted it.
361 /// `range` refers the entire operation, and `offset` refers to the specific offset into the
362 /// allocation that we are currently checking.
367 (alloc_id, alloc_range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
368 global: &mut GlobalStateInner,
369 current_span: &mut CurrentSpan<'_, '_, 'tcx>,
370 alloc_history: &mut AllocHistory,
371 exposed_tags: &FxHashSet<SbTag>,
372 ) -> InterpResult<'tcx> {
373 // Two main steps: Find granting item, remove incompatible items above.
375 // Step 1: Find granting item.
376 let granting_idx = self.find_granting(access, tag, exposed_tags).map_err(|_| {
377 alloc_history.access_error(access, tag, alloc_id, alloc_range, offset, self)
380 // Step 2: Remove incompatible items above them. Make sure we do not remove protected
381 // items. Behavior differs for reads and writes.
382 // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
383 if access == AccessKind::Write {
384 // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
385 // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
386 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
387 // The granting_idx *might* be approximate, but any lower idx would remove more
388 // things. Even if this is a Unique and the lower idx is an SRW (which removes
389 // less), there is an SRW group boundary here so strictly more would get removed.
390 self.find_first_write_incompatible(granting_idx)
392 // We are writing to something in the unknown part.
393 // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
396 self.pop_items_after(first_incompatible_idx, |item| {
399 Some((tag, alloc_range, offset, access)),
403 alloc_history.log_invalidation(item.tag, alloc_range, current_span);
407 // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
408 // The reason this is not following the stack discipline (by removing the first Unique and
409 // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
410 // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
411 // `SharedReadWrite` for `raw`.
412 // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
413 // reference and use that.
414 // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
415 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
416 // The granting_idx *might* be approximate, but any lower idx would disable more things.
419 // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
422 self.disable_uniques_starting_at(first_incompatible_idx, |item| {
425 Some((tag, alloc_range, offset, access)),
429 alloc_history.log_invalidation(item.tag, alloc_range, current_span);
434 // If this was an approximate action, we now collapse everything into an unknown.
435 if granting_idx.is_none() || matches!(tag, SbTagExtra::Wildcard) {
436 // Compute the upper bound of the items that remain.
437 // (This is why we did all the work above: to reduce the items we have to consider here.)
438 let mut max = NonZeroU64::new(1).unwrap();
439 for i in 0..self.len() {
440 let item = self.get(i).unwrap();
441 // Skip disabled items, they cannot be matched anyway.
442 if !matches!(item.perm, Permission::Disabled) {
443 // We are looking for a strict upper bound, so add 1 to this tag.
444 max = cmp::max(item.tag.0.checked_add(1).unwrap(), max);
447 if let Some(unk) = self.unknown_bottom() {
448 max = cmp::max(unk.0, max);
450 // Use `max` as new strict upper bound for everything.
452 "access: forgetting stack to upper bound {max} due to wildcard or unknown access"
454 self.set_unknown_bottom(SbTag(max));
461 /// Deallocate a location: Like a write access, but also there must be no
462 /// active protectors at all because we will remove all items.
466 (alloc_id, _alloc_range, _offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
467 global: &GlobalStateInner,
468 alloc_history: &mut AllocHistory,
469 exposed_tags: &FxHashSet<SbTag>,
470 ) -> InterpResult<'tcx> {
471 // Step 1: Make sure there is a granting item.
472 self.find_granting(AccessKind::Write, tag, exposed_tags).map_err(|_| {
474 "no item granting write access for deallocation to tag {:?} at {:?} found in borrow stack",
478 tag.and_then(|tag| alloc_history.get_logs_relevant_to(tag, None)),
482 // Step 2: Consider all items removed. This checks for protectors.
483 for idx in (0..self.len()).rev() {
484 let item = self.get(idx).unwrap();
485 Stack::item_popped(&item, None, global, alloc_history)?;
490 /// Derive a new pointer from one with the given tag.
491 /// `weak` controls whether this operation is weak or strong: weak granting does not act as
492 /// an access, and they add the new item directly on top of the one it is derived
493 /// from instead of all the way at the top of the stack.
494 /// `range` refers the entire operation, and `offset` refers to the specific location in
495 /// `range` that we are currently checking.
498 derived_from: SbTagExtra,
500 (alloc_id, alloc_range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
501 global: &mut GlobalStateInner,
502 current_span: &mut CurrentSpan<'_, '_, 'tcx>,
503 alloc_history: &mut AllocHistory,
504 exposed_tags: &FxHashSet<SbTag>,
505 ) -> InterpResult<'tcx> {
506 // Figure out which access `perm` corresponds to.
508 if new.perm.grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
510 // Now we figure out which item grants our parent (`derived_from`) this kind of access.
511 // We use that to determine where to put the new item.
513 self.find_granting(access, derived_from, exposed_tags).map_err(|_| {
514 alloc_history.grant_error(derived_from, new, alloc_id, alloc_range, offset, self)
517 // Compute where to put the new item.
518 // Either way, we ensure that we insert the new item in a way such that between
519 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
520 let new_idx = if new.perm == Permission::SharedReadWrite {
522 access == AccessKind::Write,
523 "this case only makes sense for stack-like accesses"
526 let (Some(granting_idx), SbTagExtra::Concrete(_)) = (granting_idx, derived_from) else {
527 // The parent is a wildcard pointer or matched the unknown bottom.
528 // This is approximate. Nobody knows what happened, so forget everything.
529 // The new thing is SRW anyway, so we cannot push it "on top of the unkown part"
530 // (for all we know, it might join an SRW group inside the unknown).
531 trace!("reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown");
532 self.set_unknown_bottom(global.next_ptr_tag);
536 // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
537 // access. Instead of popping the stack, we insert the item at the place the stack would
538 // be popped to (i.e., we insert it above all the write-compatible items).
539 // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
540 self.find_first_write_incompatible(granting_idx)
542 // A "safe" reborrow for a pointer that actually expects some aliasing guarantees.
543 // Here, creating a reference actually counts as an access.
544 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
548 (alloc_id, alloc_range, offset),
555 // We insert "as far up as possible": We know only compatible items are remaining
556 // on top of `derived_from`, and we want the new item at the top so that we
557 // get the strongest possible guarantees.
558 // This ensures U1 and F1.
562 // Put the new item there. As an optimization, deduplicate if it is equal to one of its new neighbors.
563 // `new_idx` might be 0 if we just cleared the entire stack.
564 if self.get(new_idx) == Some(new) || (new_idx > 0 && self.get(new_idx - 1).unwrap() == new)
566 // Optimization applies, done.
567 trace!("reborrow: avoiding adding redundant item {:?}", new);
569 trace!("reborrow: adding item {:?}", new);
570 self.insert(new_idx, new);
575 // # Stacked Borrows Core End
577 /// Map per-stack operations to higher-level per-location-range operations.
579 /// Creates new stack with initial tag.
580 fn new(size: Size, perm: Permission, tag: SbTag) -> Self {
581 let item = Item { perm, tag, protector: None };
582 let stack = Stack::new(item);
585 stacks: RangeMap::new(size, stack),
586 history: AllocHistory::new(),
587 exposed_tags: FxHashSet::default(),
591 /// Call `f` on every stack in the range.
599 &mut FxHashSet<SbTag>,
600 ) -> InterpResult<'tcx>,
601 ) -> InterpResult<'tcx> {
602 for (offset, stack) in self.stacks.iter_mut(range.start, range.size) {
603 f(offset, stack, &mut self.history, &mut self.exposed_tags)?;
609 /// Glue code to connect with Miri Machine Hooks
611 pub fn new_allocation(
615 kind: MemoryKind<MiriMemoryKind>,
616 mut current_span: CurrentSpan<'_, '_, '_>,
618 let mut extra = state.borrow_mut();
619 let (base_tag, perm) = match kind {
620 // New unique borrow. This tag is not accessible by the program,
621 // so it will only ever be used when using the local directly (i.e.,
622 // not through a pointer). That is, whenever we directly write to a local, this will pop
623 // everything else off the stack, invalidating all previous pointers,
624 // and in particular, *all* raw pointers.
625 MemoryKind::Stack => (extra.base_ptr_tag(id), Permission::Unique),
626 // Everything else is shared by default.
627 _ => (extra.base_ptr_tag(id), Permission::SharedReadWrite),
629 let mut stacks = Stacks::new(size, perm, base_tag);
630 stacks.history.log_creation(
633 alloc_range(Size::ZERO, size),
640 pub fn memory_read<'tcx>(
646 mut current_span: CurrentSpan<'_, '_, 'tcx>,
647 ) -> InterpResult<'tcx> {
649 "read access with tag {:?}: {:?}, size {}",
651 Pointer::new(alloc_id, range.start),
654 let mut state = state.borrow_mut();
655 self.for_each(range, |offset, stack, history, exposed_tags| {
659 (alloc_id, range, offset),
669 pub fn memory_written<'tcx>(
675 mut current_span: CurrentSpan<'_, '_, 'tcx>,
676 ) -> InterpResult<'tcx> {
678 "write access with tag {:?}: {:?}, size {}",
680 Pointer::new(alloc_id, range.start),
683 let mut state = state.borrow_mut();
684 self.for_each(range, |offset, stack, history, exposed_tags| {
688 (alloc_id, range, offset),
698 pub fn memory_deallocated<'tcx>(
704 ) -> InterpResult<'tcx> {
705 trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
706 let state = state.borrow();
707 self.for_each(range, |offset, stack, history, exposed_tags| {
708 stack.dealloc(tag, (alloc_id, range, offset), &state, history, exposed_tags)
714 /// Retagging/reborrowing. There is some policy in here, such as which permissions
715 /// to grant for which references, and when to add protectors.
716 impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
717 trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
718 /// Returns the `AllocId` the reborrow was done in, if some actual borrow stack manipulation
722 place: &MPlaceTy<'tcx, Tag>,
727 ) -> InterpResult<'tcx, Option<AllocId>> {
728 let this = self.eval_context_mut();
729 let current_span = &mut this.machine.current_span();
731 // It is crucial that this gets called on all code paths, to ensure we track tag creation.
732 let log_creation = |this: &MiriEvalContext<'mir, 'tcx>,
733 current_span: &mut CurrentSpan<'_, 'mir, 'tcx>,
734 loc: Option<(AllocId, Size, SbTagExtra)>| // alloc_id, base_offset, orig_tag
735 -> InterpResult<'tcx> {
736 let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
737 if global.tracked_pointer_tags.contains(&new_tag) {
738 register_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
740 loc.map(|(alloc_id, base_offset, _)| (alloc_id, alloc_range(base_offset, size))),
743 drop(global); // don't hold that reference any longer than we have to
745 let Some((alloc_id, base_offset, orig_tag)) = loc else {
749 // The SB history tracking needs a parent tag, so skip if we come from a wildcard.
750 let SbTagExtra::Concrete(orig_tag) = orig_tag else {
751 // FIXME: should we log this?
755 let extra = this.get_alloc_extra(alloc_id)?;
756 let mut stacked_borrows = extra
759 .expect("we should have Stacked Borrows data")
761 stacked_borrows.history.log_creation(
764 alloc_range(base_offset, size),
768 stacked_borrows.history.log_protector(orig_tag, new_tag, current_span);
773 if size == Size::ZERO {
775 "reborrow of size 0: {} reference {:?} derived from {:?} (pointee {})",
781 // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
782 // touches no bytes so there is no stack to put this tag in.
783 // However, if the pointer for this operation points at a real allocation we still
784 // record where it was created so that we can issue a helpful diagnostic if there is an
785 // attempt to use it for a non-zero-sized access.
786 // Dangling slices are a common case here; it's valid to get their length but with raw
787 // pointer tagging for example all calls to get_unchecked on them are invalid.
788 if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr) {
789 log_creation(this, current_span, Some((alloc_id, base_offset, orig_tag)))?;
790 return Ok(Some(alloc_id));
792 // This pointer doesn't come with an AllocId. :shrug:
793 log_creation(this, current_span, None)?;
796 let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr)?;
797 log_creation(this, current_span, Some((alloc_id, base_offset, orig_tag)))?;
799 // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
800 let (alloc_size, _) = this.get_live_alloc_size_and_align(alloc_id)?;
801 if base_offset + size > alloc_size {
802 throw_ub!(PointerOutOfBounds {
805 ptr_offset: this.machine_usize_to_isize(base_offset.bytes()),
807 msg: CheckInAllocMsg::InboundsTest
811 let protector = if protect { Some(this.frame().extra.call_id) } else { None };
813 "reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
818 Pointer::new(alloc_id, base_offset),
822 // Update the stacks.
823 // Make sure that raw pointers and mutable shared references are reborrowed "weak":
824 // There could be existing unique pointers reborrowed from them that should remain valid!
825 let perm = match kind {
826 RefKind::Unique { two_phase: false }
827 if place.layout.ty.is_unpin(this.tcx.at(DUMMY_SP), this.param_env()) =>
829 // Only if the type is unpin do we actually enforce uniqueness
832 RefKind::Unique { .. } => {
833 // Two-phase references and !Unpin references are treated as SharedReadWrite
834 Permission::SharedReadWrite
836 RefKind::Raw { mutable: true } => Permission::SharedReadWrite,
837 RefKind::Shared | RefKind::Raw { mutable: false } => {
838 // Shared references and *const are a whole different kind of game, the
839 // permission is not uniform across the entire range!
840 // We need a frozen-sensitive reborrow.
841 // We have to use shared references to alloc/memory_extra here since
842 // `visit_freeze_sensitive` needs to access the global state.
843 let extra = this.get_alloc_extra(alloc_id)?;
844 let mut stacked_borrows = extra
847 .expect("we should have Stacked Borrows data")
849 this.visit_freeze_sensitive(place, size, |mut range, frozen| {
851 range.start += base_offset;
852 // We are only ever `SharedReadOnly` inside the frozen bits.
853 let perm = if frozen {
854 Permission::SharedReadOnly
856 Permission::SharedReadWrite
858 let protector = if frozen {
861 // We do not protect inside UnsafeCell.
862 // This fixes https://github.com/rust-lang/rust/issues/55005.
865 let item = Item { perm, tag: new_tag, protector };
866 let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
867 stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
871 (alloc_id, range, offset),
879 return Ok(Some(alloc_id));
882 // Here we can avoid `borrow()` calls because we have mutable references.
883 // Note that this asserts that the allocation is mutable -- but since we are creating a
884 // mutable pointer, that seems reasonable.
885 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
886 let mut stacked_borrows = alloc_extra
889 .expect("we should have Stacked Borrows data")
891 let item = Item { perm, tag: new_tag, protector };
892 let range = alloc_range(base_offset, size);
893 let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut();
894 let current_span = &mut machine.current_span(); // `get_alloc_extra_mut` invalidated our old `current_span`
895 stacked_borrows.for_each(range, |offset, stack, history, exposed_tags| {
899 (alloc_id, range, offset),
910 /// Retags an indidual pointer, returning the retagged version.
911 /// `mutbl` can be `None` to make this a raw pointer.
914 val: &ImmTy<'tcx, Tag>,
917 ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
918 let this = self.eval_context_mut();
919 // We want a place for where the ptr *points to*, so we get one.
920 let place = this.ref_to_mplace(val)?;
921 let size = this.size_and_align_of_mplace(&place)?.map(|(size, _)| size);
922 // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
923 // bail out -- we cannot reasonably figure out which memory range to reborrow.
924 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
925 let size = match size {
927 None => return Ok(*val),
930 // Compute new borrow.
931 let new_tag = this.machine.stacked_borrows.as_mut().unwrap().get_mut().new_ptr();
934 let alloc_id = this.reborrow(&place, size, kind, new_tag, protect)?;
937 let new_place = place.map_provenance(|p| {
941 // If `reborrow` could figure out the AllocId of this ptr, hard-code it into the new one.
942 // Even if we started out with a wildcard, this newly retagged pointer is tied to that allocation.
943 Tag::Concrete { alloc_id, sb: new_tag }
946 // Looks like this has to stay a wildcard pointer.
947 assert!(matches!(prov, Tag::Wildcard));
954 // Return new pointer.
955 Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
959 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
960 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
961 fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
962 let this = self.eval_context_mut();
963 // Determine mutability and whether to add a protector.
964 // Cannot use `builtin_deref` because that reports *immutable* for `Box`,
965 // making it useless.
966 fn qualify(ty: ty::Ty<'_>, kind: RetagKind) -> Option<(RefKind, bool)> {
968 // References are simple.
969 ty::Ref(_, _, Mutability::Mut) =>
971 RefKind::Unique { two_phase: kind == RetagKind::TwoPhase },
972 kind == RetagKind::FnEntry,
974 ty::Ref(_, _, Mutability::Not) =>
975 Some((RefKind::Shared, kind == RetagKind::FnEntry)),
976 // Raw pointers need to be enabled.
977 ty::RawPtr(tym) if kind == RetagKind::Raw =>
978 Some((RefKind::Raw { mutable: tym.mutbl == Mutability::Mut }, false)),
979 // Boxes are handled separately due to that allocator situation.
984 // We need a visitor to visit all references. However, that requires
985 // a `MPlaceTy` (or `OpTy`), so we have a fast path for reference types that
986 // avoids allocating.
988 if let Some((ref_kind, protector)) = qualify(place.layout.ty, kind) {
990 let val = this.read_immediate(&this.place_to_op(place)?)?;
991 let val = this.retag_reference(&val, ref_kind, protector)?;
992 this.write_immediate(*val, place)?;
996 // If we don't want to recurse, we are already done.
997 // EXCEPT if this is a `Box`, then we have to recurse because allocators.
998 // (Yes this means we technically also recursively retag the allocator itself even if field
999 // retagging is not enabled. *shrug*)
1000 if !this.machine.stacked_borrows.as_mut().unwrap().get_mut().retag_fields
1001 && !place.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_box())
1006 // Skip some types that have no further structure we might care about.
1008 place.layout.ty.kind(),
1019 // Now go visit this thing.
1020 let place = this.force_allocation(place)?;
1022 let mut visitor = RetagVisitor { ecx: this, kind };
1023 return visitor.visit_value(&place);
1025 // The actual visitor.
1026 struct RetagVisitor<'ecx, 'mir, 'tcx> {
1027 ecx: &'ecx mut MiriEvalContext<'mir, 'tcx>,
1030 impl<'ecx, 'mir, 'tcx> MutValueVisitor<'mir, 'tcx, Evaluator<'mir, 'tcx>>
1031 for RetagVisitor<'ecx, 'mir, 'tcx>
1033 type V = MPlaceTy<'tcx, Tag>;
1036 fn ecx(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> {
1040 fn visit_box(&mut self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
1041 // Boxes do not get a protector: protectors reflect that references outlive the call
1042 // they were passed in to; that's just not the case for boxes.
1043 let (ref_kind, protector) = (RefKind::Unique { two_phase: false }, false);
1045 let val = self.ecx.read_immediate(&place.into())?;
1046 let val = self.ecx.retag_reference(&val, ref_kind, protector)?;
1047 self.ecx.write_immediate(*val, &place.into())?;
1051 fn visit_value(&mut self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
1052 if let Some((ref_kind, protector)) = qualify(place.layout.ty, self.kind) {
1053 let val = self.ecx.read_immediate(&place.into())?;
1054 let val = self.ecx.retag_reference(&val, ref_kind, protector)?;
1055 self.ecx.write_immediate(*val, &place.into())?;
1056 } else if matches!(place.layout.ty.kind(), ty::RawPtr(..)) {
1057 // Wide raw pointers *do* have fields and their types are strange.
1058 // vtables have a type like `&[*const (); 3]` or so!
1059 // Do *not* recurse into them.
1060 // (No need to worry about wide references or boxes, those always "qualify".)
1062 // Maybe we need to go deeper.
1063 self.walk_value(place)?;
1070 /// After a stack frame got pushed, retag the return place so that we are sure
1071 /// it does not alias with anything.
1073 /// This is a HACK because there is nothing in MIR that would make the retag
1074 /// explicit. Also see <https://github.com/rust-lang/rust/issues/71117>.
1075 fn retag_return_place(&mut self) -> InterpResult<'tcx> {
1076 let this = self.eval_context_mut();
1077 let return_place = this.frame_mut().return_place;
1078 if return_place.layout.is_zst() {
1079 // There may not be any memory here, nothing to do.
1082 // We need this to be in-memory to use tagged pointers.
1083 let return_place = this.force_allocation(&return_place)?;
1085 // We have to turn the place into a pointer to use the existing code.
1086 // (The pointer type does not matter, so we use a raw pointer.)
1087 let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
1088 let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
1090 let val = this.retag_reference(
1092 RefKind::Unique { two_phase: false },
1095 // And use reborrowed pointer for return place.
1096 let return_place = this.ref_to_mplace(&val)?;
1097 this.frame_mut().return_place = return_place.into();
1102 /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
1103 fn expose_tag(&mut self, alloc_id: AllocId, tag: SbTag) {
1104 let this = self.eval_context_mut();
1106 // Function pointers and dead objects don't have an alloc_extra so we ignore them.
1107 // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
1108 // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
1109 let (_size, _align, kind) = this.get_alloc_info(alloc_id);
1111 AllocKind::LiveData => {
1112 // This should have alloc_extra data.
1113 let alloc_extra = this.get_alloc_extra(alloc_id).unwrap();
1114 trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
1115 alloc_extra.stacked_borrows.as_ref().unwrap().borrow_mut().exposed_tags.insert(tag);
1117 AllocKind::Function | AllocKind::Dead => {
1118 // No stacked borrows on these allocations.