--- /dev/null
+use smallvec::SmallVec;
+use std::fmt;
+
+use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange, InterpError};
+use rustc_span::{Span, SpanData};
+use rustc_target::abi::Size;
+
+use crate::borrow_tracker::{
+ stacked_borrows::{err_sb_ub, Permission},
+ AccessKind, GlobalStateInner, ProtectorKind,
+};
+use crate::*;
+
+#[derive(Clone, Debug)]
+pub struct AllocHistory {
+ id: AllocId,
+ base: (Item, Span),
+ creations: smallvec::SmallVec<[Creation; 1]>,
+ invalidations: smallvec::SmallVec<[Invalidation; 1]>,
+ protectors: smallvec::SmallVec<[Protection; 1]>,
+}
+
+#[derive(Clone, Debug)]
+struct Creation {
+ retag: RetagOp,
+ span: Span,
+}
+
+impl Creation {
+ fn generate_diagnostic(&self) -> (String, SpanData) {
+ let tag = self.retag.new_tag;
+ if let Some(perm) = self.retag.permission {
+ (
+ format!(
+ "{tag:?} was created by a {:?} retag at offsets {:?}",
+ perm, self.retag.range,
+ ),
+ self.span.data(),
+ )
+ } else {
+ assert!(self.retag.range.size == Size::ZERO);
+ (
+ format!(
+ "{tag:?} would have been created here, but this is a zero-size retag ({:?}) so the tag in question does not exist anywhere",
+ self.retag.range,
+ ),
+ self.span.data(),
+ )
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+struct Invalidation {
+ tag: BorTag,
+ range: AllocRange,
+ span: Span,
+ cause: InvalidationCause,
+}
+
+#[derive(Clone, Debug)]
+enum InvalidationCause {
+ Access(AccessKind),
+ Retag(Permission, RetagCause),
+}
+
+impl Invalidation {
+ fn generate_diagnostic(&self) -> (String, SpanData) {
+ let message = if let InvalidationCause::Retag(_, RetagCause::FnEntry) = self.cause {
+ // For a FnEntry retag, our Span points at the caller.
+ // See `DiagnosticCx::log_invalidation`.
+ format!(
+ "{:?} was later invalidated at offsets {:?} by a {} inside this call",
+ self.tag, self.range, self.cause
+ )
+ } else {
+ format!(
+ "{:?} was later invalidated at offsets {:?} by a {}",
+ self.tag, self.range, self.cause
+ )
+ };
+ (message, self.span.data())
+ }
+}
+
+impl fmt::Display for InvalidationCause {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ InvalidationCause::Access(kind) => write!(f, "{kind}"),
+ InvalidationCause::Retag(perm, kind) =>
+ if *kind == RetagCause::FnEntry {
+ write!(f, "{perm:?} FnEntry retag")
+ } else {
+ write!(f, "{perm:?} retag")
+ },
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+struct Protection {
+ tag: BorTag,
+ span: Span,
+}
+
+#[derive(Clone)]
+pub struct TagHistory {
+ pub created: (String, SpanData),
+ pub invalidated: Option<(String, SpanData)>,
+ pub protected: Option<(String, SpanData)>,
+}
+
+pub struct DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
+ operation: Operation,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
+}
+
+pub struct DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
+ operation: Operation,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
+ history: &'history mut AllocHistory,
+ offset: Size,
+}
+
+impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
+ pub fn build<'history>(
+ self,
+ history: &'history mut AllocHistory,
+ offset: Size,
+ ) -> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
+ DiagnosticCx { operation: self.operation, machine: self.machine, history, offset }
+ }
+
+ pub fn retag(
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
+ cause: RetagCause,
+ new_tag: BorTag,
+ orig_tag: ProvenanceExtra,
+ range: AllocRange,
+ ) -> Self {
+ let operation =
+ Operation::Retag(RetagOp { cause, new_tag, orig_tag, range, permission: None });
+
+ DiagnosticCxBuilder { machine, operation }
+ }
+
+ pub fn read(
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
+ tag: ProvenanceExtra,
+ range: AllocRange,
+ ) -> Self {
+ let operation = Operation::Access(AccessOp { kind: AccessKind::Read, tag, range });
+ DiagnosticCxBuilder { machine, operation }
+ }
+
+ pub fn write(
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
+ tag: ProvenanceExtra,
+ range: AllocRange,
+ ) -> Self {
+ let operation = Operation::Access(AccessOp { kind: AccessKind::Write, tag, range });
+ DiagnosticCxBuilder { machine, operation }
+ }
+
+ pub fn dealloc(machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra) -> Self {
+ let operation = Operation::Dealloc(DeallocOp { tag });
+ DiagnosticCxBuilder { machine, operation }
+ }
+}
+
+impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
+ pub fn unbuild(self) -> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
+ DiagnosticCxBuilder { machine: self.machine, operation: self.operation }
+ }
+}
+
+#[derive(Debug, Clone)]
+enum Operation {
+ Retag(RetagOp),
+ Access(AccessOp),
+ Dealloc(DeallocOp),
+}
+
+#[derive(Debug, Clone)]
+struct RetagOp {
+ cause: RetagCause,
+ new_tag: BorTag,
+ orig_tag: ProvenanceExtra,
+ range: AllocRange,
+ permission: Option<Permission>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum RetagCause {
+ Normal,
+ FnReturn,
+ FnEntry,
+ TwoPhase,
+}
+
+#[derive(Debug, Clone)]
+struct AccessOp {
+ kind: AccessKind,
+ tag: ProvenanceExtra,
+ range: AllocRange,
+}
+
+#[derive(Debug, Clone)]
+struct DeallocOp {
+ tag: ProvenanceExtra,
+}
+
+impl AllocHistory {
+ pub fn new(id: AllocId, item: Item, machine: &MiriMachine<'_, '_>) -> Self {
+ Self {
+ id,
+ base: (item, machine.current_span()),
+ creations: SmallVec::new(),
+ invalidations: SmallVec::new(),
+ protectors: SmallVec::new(),
+ }
+ }
+}
+
+impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
+ pub fn start_grant(&mut self, perm: Permission) {
+ let Operation::Retag(op) = &mut self.operation else {
+ unreachable!("start_grant must only be called during a retag, this is: {:?}", self.operation)
+ };
+ op.permission = Some(perm);
+
+ let last_creation = &mut self.history.creations.last_mut().unwrap();
+ match last_creation.retag.permission {
+ None => {
+ last_creation.retag.permission = Some(perm);
+ }
+ Some(previous) =>
+ if previous != perm {
+ // 'Split up' the creation event.
+ let previous_range = last_creation.retag.range;
+ last_creation.retag.range = alloc_range(previous_range.start, self.offset);
+ let mut new_event = last_creation.clone();
+ new_event.retag.range = alloc_range(self.offset, previous_range.end());
+ new_event.retag.permission = Some(perm);
+ self.history.creations.push(new_event);
+ },
+ }
+ }
+
+ pub fn log_creation(&mut self) {
+ let Operation::Retag(op) = &self.operation else {
+ unreachable!("log_creation must only be called during a retag")
+ };
+ self.history
+ .creations
+ .push(Creation { retag: op.clone(), span: self.machine.current_span() });
+ }
+
+ pub fn log_invalidation(&mut self, tag: BorTag) {
+ let mut span = self.machine.current_span();
+ let (range, cause) = match &self.operation {
+ Operation::Retag(RetagOp { cause, range, permission, .. }) => {
+ if *cause == RetagCause::FnEntry {
+ span = self.machine.caller_span();
+ }
+ (*range, InvalidationCause::Retag(permission.unwrap(), *cause))
+ }
+ Operation::Access(AccessOp { kind, range, .. }) =>
+ (*range, InvalidationCause::Access(*kind)),
+ Operation::Dealloc(_) => {
+ // This can be reached, but never be relevant later since the entire allocation is
+ // gone now.
+ return;
+ }
+ };
+ self.history.invalidations.push(Invalidation { tag, range, span, cause });
+ }
+
+ pub fn log_protector(&mut self) {
+ let Operation::Retag(op) = &self.operation else {
+ unreachable!("Protectors can only be created during a retag")
+ };
+ self.history
+ .protectors
+ .push(Protection { tag: op.new_tag, span: self.machine.current_span() });
+ }
+
+ pub fn get_logs_relevant_to(
+ &self,
+ tag: BorTag,
+ protector_tag: Option<BorTag>,
+ ) -> Option<TagHistory> {
+ let Some(created) = self.history
+ .creations
+ .iter()
+ .rev()
+ .find_map(|event| {
+ // First, look for a Creation event where the tag and the offset matches. This
+ // ensrues that we pick the right Creation event when a retag isn't uniform due to
+ // Freeze.
+ let range = event.retag.range;
+ if event.retag.new_tag == tag
+ && self.offset >= range.start
+ && self.offset < (range.start + range.size)
+ {
+ Some(event.generate_diagnostic())
+ } else {
+ None
+ }
+ })
+ .or_else(|| {
+ // If we didn't find anything with a matching offset, just return the event where
+ // the tag was created. This branch is hit when we use a tag at an offset that
+ // doesn't have the tag.
+ self.history.creations.iter().rev().find_map(|event| {
+ if event.retag.new_tag == tag {
+ Some(event.generate_diagnostic())
+ } else {
+ None
+ }
+ })
+ }).or_else(|| {
+ // If we didn't find a retag that created this tag, it might be the base tag of
+ // this allocation.
+ if self.history.base.0.tag() == tag {
+ Some((
+ format!("{tag:?} was created here, as the base tag for {:?}", self.history.id),
+ self.history.base.1.data()
+ ))
+ } else {
+ None
+ }
+ }) else {
+ // But if we don't have a creation event, this is related to a wildcard, and there
+ // is really nothing we can do to help.
+ return None;
+ };
+
+ let invalidated = self.history.invalidations.iter().rev().find_map(|event| {
+ if event.tag == tag { Some(event.generate_diagnostic()) } else { None }
+ });
+
+ let protected = protector_tag
+ .and_then(|protector| {
+ self.history.protectors.iter().find(|protection| protection.tag == protector)
+ })
+ .map(|protection| {
+ let protected_tag = protection.tag;
+ (format!("{protected_tag:?} is this argument"), protection.span.data())
+ });
+
+ Some(TagHistory { created, invalidated, protected })
+ }
+
+ /// Report a descriptive error when `new` could not be granted from `derived_from`.
+ #[inline(never)] // This is only called on fatal code paths
+ pub(super) fn grant_error(&self, stack: &Stack) -> InterpError<'tcx> {
+ let Operation::Retag(op) = &self.operation else {
+ unreachable!("grant_error should only be called during a retag")
+ };
+ let perm =
+ op.permission.expect("`start_grant` must be called before calling `grant_error`");
+ let action = format!(
+ "trying to retag from {:?} for {:?} permission at {:?}[{:#x}]",
+ op.orig_tag,
+ perm,
+ self.history.id,
+ self.offset.bytes(),
+ );
+ err_sb_ub(
+ format!("{action}{}", error_cause(stack, op.orig_tag)),
+ Some(operation_summary(&op.cause.summary(), self.history.id, op.range)),
+ op.orig_tag.and_then(|orig_tag| self.get_logs_relevant_to(orig_tag, None)),
+ )
+ }
+
+ /// Report a descriptive error when `access` is not permitted based on `tag`.
+ #[inline(never)] // This is only called on fatal code paths
+ pub(super) fn access_error(&self, stack: &Stack) -> InterpError<'tcx> {
+ // Deallocation and retagging also do an access as part of their thing, so handle that here, too.
+ let op = match &self.operation {
+ Operation::Access(op) => op,
+ Operation::Retag(_) => return self.grant_error(stack),
+ Operation::Dealloc(_) => return self.dealloc_error(stack),
+ };
+ let action = format!(
+ "attempting a {access} using {tag:?} at {alloc_id:?}[{offset:#x}]",
+ access = op.kind,
+ tag = op.tag,
+ alloc_id = self.history.id,
+ offset = self.offset.bytes(),
+ );
+ err_sb_ub(
+ format!("{action}{}", error_cause(stack, op.tag)),
+ Some(operation_summary("an access", self.history.id, op.range)),
+ op.tag.and_then(|tag| self.get_logs_relevant_to(tag, None)),
+ )
+ }
+
+ #[inline(never)] // This is only called on fatal code paths
+ pub(super) fn protector_error(&self, item: &Item, kind: ProtectorKind) -> InterpError<'tcx> {
+ let protected = match kind {
+ ProtectorKind::WeakProtector => "weakly protected",
+ ProtectorKind::StrongProtector => "strongly protected",
+ };
+ let call_id = self
+ .machine
+ .threads
+ .all_stacks()
+ .flatten()
+ .map(|frame| {
+ frame.extra.borrow_tracker.as_ref().expect("we should have borrow tracking data")
+ })
+ .find(|frame| frame.protected_tags.contains(&item.tag()))
+ .map(|frame| frame.call_id)
+ .unwrap(); // FIXME: Surely we should find something, but a panic seems wrong here?
+ match self.operation {
+ Operation::Dealloc(_) =>
+ err_sb_ub(
+ format!("deallocating while item {item:?} is {protected} by call {call_id:?}",),
+ None,
+ None,
+ ),
+ Operation::Retag(RetagOp { orig_tag: tag, .. })
+ | Operation::Access(AccessOp { tag, .. }) =>
+ err_sb_ub(
+ format!(
+ "not granting access to tag {tag:?} because that would remove {item:?} which is {protected} because it is an argument of call {call_id:?}",
+ ),
+ None,
+ tag.and_then(|tag| self.get_logs_relevant_to(tag, Some(item.tag()))),
+ ),
+ }
+ }
+
+ #[inline(never)] // This is only called on fatal code paths
+ pub fn dealloc_error(&self, stack: &Stack) -> InterpError<'tcx> {
+ let Operation::Dealloc(op) = &self.operation else {
+ unreachable!("dealloc_error should only be called during a deallocation")
+ };
+ err_sb_ub(
+ format!(
+ "attempting deallocation using {tag:?} at {alloc_id:?}{cause}",
+ tag = op.tag,
+ alloc_id = self.history.id,
+ cause = error_cause(stack, op.tag),
+ ),
+ None,
+ op.tag.and_then(|tag| self.get_logs_relevant_to(tag, None)),
+ )
+ }
+
+ #[inline(never)]
+ pub fn check_tracked_tag_popped(&self, item: &Item, global: &GlobalStateInner) {
+ if !global.tracked_pointer_tags.contains(&item.tag()) {
+ return;
+ }
+ let summary = match self.operation {
+ Operation::Dealloc(_) => None,
+ Operation::Access(AccessOp { kind, tag, .. }) => Some((tag, kind)),
+ Operation::Retag(RetagOp { orig_tag, permission, .. }) => {
+ let kind = match permission
+ .expect("start_grant should set the current permission before popping a tag")
+ {
+ Permission::SharedReadOnly => AccessKind::Read,
+ Permission::Unique => AccessKind::Write,
+ Permission::SharedReadWrite | Permission::Disabled => {
+ panic!("Only SharedReadOnly and Unique retags can pop tags");
+ }
+ };
+ Some((orig_tag, kind))
+ }
+ };
+ self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
+ }
+}
+
+fn operation_summary(operation: &str, alloc_id: AllocId, alloc_range: AllocRange) -> String {
+ format!("this error occurs as part of {operation} at {alloc_id:?}{alloc_range:?}")
+}
+
+fn error_cause(stack: &Stack, prov_extra: ProvenanceExtra) -> &'static str {
+ if let ProvenanceExtra::Concrete(tag) = prov_extra {
+ if (0..stack.len())
+ .map(|i| stack.get(i).unwrap())
+ .any(|item| item.tag() == tag && item.perm() != Permission::Disabled)
+ {
+ ", but that tag only grants SharedReadOnly permission for this location"
+ } else {
+ ", but that tag does not exist in the borrow stack for this location"
+ }
+ } else {
+ ", but no exposed tags have suitable permission in the borrow stack for this location"
+ }
+}
+
+impl RetagCause {
+ fn summary(&self) -> String {
+ match self {
+ RetagCause::Normal => "retag",
+ RetagCause::FnEntry => "FnEntry retag",
+ RetagCause::FnReturn => "FnReturn retag",
+ RetagCause::TwoPhase => "two-phase retag",
+ }
+ .to_string()
+ }
+}
--- /dev/null
+use std::fmt;
+
+use crate::borrow_tracker::BorTag;
+
+/// An item in the per-location borrow stack.
+#[derive(Copy, Clone, Hash, PartialEq, Eq)]
+pub struct Item(u64);
+
+// An Item contains 3 bitfields:
+// * Bits 0-61 store a BorTag
+// * Bits 61-63 store a Permission
+// * Bit 64 stores a flag which indicates if we have a protector
+const TAG_MASK: u64 = u64::MAX >> 3;
+const PERM_MASK: u64 = 0x3 << 61;
+const PROTECTED_MASK: u64 = 0x1 << 63;
+
+const PERM_SHIFT: u64 = 61;
+const PROTECTED_SHIFT: u64 = 63;
+
+impl Item {
+ pub fn new(tag: BorTag, perm: Permission, protected: bool) -> Self {
+ assert!(tag.get() <= TAG_MASK);
+ let packed_tag = tag.get();
+ let packed_perm = perm.to_bits() << PERM_SHIFT;
+ let packed_protected = u64::from(protected) << PROTECTED_SHIFT;
+
+ let new = Self(packed_tag | packed_perm | packed_protected);
+
+ debug_assert!(new.tag() == tag);
+ debug_assert!(new.perm() == perm);
+ debug_assert!(new.protected() == protected);
+
+ new
+ }
+
+ /// The pointers the permission is granted to.
+ pub fn tag(self) -> BorTag {
+ BorTag::new(self.0 & TAG_MASK).unwrap()
+ }
+
+ /// The permission this item grants.
+ pub fn perm(self) -> Permission {
+ Permission::from_bits((self.0 & PERM_MASK) >> PERM_SHIFT)
+ }
+
+ /// Whether or not there is a protector for this tag
+ pub fn protected(self) -> bool {
+ self.0 & PROTECTED_MASK > 0
+ }
+
+ /// Set the Permission stored in this Item
+ pub fn set_permission(&mut self, perm: Permission) {
+ // Clear the current set permission
+ self.0 &= !PERM_MASK;
+ // Write Permission::Disabled to the Permission bits
+ self.0 |= perm.to_bits() << PERM_SHIFT;
+ }
+}
+
+impl fmt::Debug for Item {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{:?} for {:?}]", self.perm(), self.tag())
+ }
+}
+
+/// Indicates which permission is granted (by this item to some pointers)
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub enum Permission {
+ /// Grants unique mutable access.
+ Unique,
+ /// Grants shared mutable access.
+ SharedReadWrite,
+ /// Grants shared read-only access.
+ SharedReadOnly,
+ /// Grants no access, but separates two groups of SharedReadWrite so they are not
+ /// all considered mutually compatible.
+ Disabled,
+}
+
+impl Permission {
+ const UNIQUE: u64 = 0;
+ const SHARED_READ_WRITE: u64 = 1;
+ const SHARED_READ_ONLY: u64 = 2;
+ const DISABLED: u64 = 3;
+
+ fn to_bits(self) -> u64 {
+ match self {
+ Permission::Unique => Self::UNIQUE,
+ Permission::SharedReadWrite => Self::SHARED_READ_WRITE,
+ Permission::SharedReadOnly => Self::SHARED_READ_ONLY,
+ Permission::Disabled => Self::DISABLED,
+ }
+ }
+
+ fn from_bits(perm: u64) -> Self {
+ match perm {
+ Self::UNIQUE => Permission::Unique,
+ Self::SHARED_READ_WRITE => Permission::SharedReadWrite,
+ Self::SHARED_READ_ONLY => Permission::SharedReadOnly,
+ Self::DISABLED => Permission::Disabled,
+ _ => unreachable!(),
+ }
+ }
+}
--- /dev/null
+//! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
+//! for further information.
+
+use log::trace;
+use std::cmp;
+use std::fmt::{self, Write};
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::{Mutability, RetagKind};
+use rustc_middle::ty::{
+ self,
+ layout::{HasParamEnv, LayoutOf},
+};
+use rustc_target::abi::{Abi, Size};
+
+use crate::borrow_tracker::{
+ stacked_borrows::diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, TagHistory},
+ AccessKind, GlobalStateInner, ProtectorKind, RetagCause, RetagFields,
+};
+use crate::*;
+
+mod item;
+pub use item::{Item, Permission};
+mod stack;
+pub use stack::Stack;
+pub mod diagnostics;
+
+pub type AllocExtra = Stacks;
+
+/// Extra per-allocation state.
+#[derive(Clone, Debug)]
+pub struct Stacks {
+ // Even reading memory can have effects on the stack, so we need a `RefCell` here.
+ stacks: RangeMap<Stack>,
+ /// Stores past operations on this allocation
+ history: AllocHistory,
+ /// The set of tags that have been exposed inside this allocation.
+ exposed_tags: FxHashSet<BorTag>,
+ /// Whether this memory has been modified since the last time the tag GC ran
+ modified_since_last_gc: bool,
+}
+
+/// Indicates which kind of reference is being created.
+/// Used by high-level `reborrow` to compute which permissions to grant to the
+/// new pointer.
+#[derive(Copy, Clone, Hash, PartialEq, Eq)]
+enum RefKind {
+ /// `&mut` and `Box`.
+ Unique { two_phase: bool },
+ /// `&` with or without interior mutability.
+ Shared,
+ /// `*mut`/`*const` (raw pointers).
+ Raw { mutable: bool },
+}
+
+impl fmt::Display for RefKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ RefKind::Unique { two_phase: false } => write!(f, "unique reference"),
+ RefKind::Unique { two_phase: true } => write!(f, "unique reference (two-phase)"),
+ RefKind::Shared => write!(f, "shared reference"),
+ RefKind::Raw { mutable: true } => write!(f, "raw (mutable) pointer"),
+ RefKind::Raw { mutable: false } => write!(f, "raw (constant) pointer"),
+ }
+ }
+}
+
+/// Error reporting
+pub fn err_sb_ub<'tcx>(
+ msg: String,
+ help: Option<String>,
+ history: Option<TagHistory>,
+) -> InterpError<'tcx> {
+ err_machine_stop!(TerminationInfo::StackedBorrowsUb { msg, help, history })
+}
+
+// # Stacked Borrows Core Begin
+
+/// We need to make at least the following things true:
+///
+/// U1: After creating a `Uniq`, it is at the top.
+/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
+/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
+///
+/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
+/// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
+/// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
+/// gets popped.
+/// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
+/// F3: If an access happens with an `&` outside `UnsafeCell`,
+/// it requires the `SharedReadOnly` to still be in the stack.
+
+/// Core relation on `Permission` to define which accesses are allowed
+impl Permission {
+ /// This defines for a given permission, whether it permits the given kind of access.
+ fn grants(self, access: AccessKind) -> bool {
+ // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
+ self != Permission::Disabled
+ && (access == AccessKind::Read || self != Permission::SharedReadOnly)
+ }
+}
+
+/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
+#[derive(Copy, Clone, Debug)]
+enum ItemInvalidationCause {
+ Conflict,
+ Dealloc,
+}
+
+/// Core per-location operations: access, dealloc, reborrow.
+impl<'tcx> Stack {
+ /// Find the first write-incompatible item above the given one --
+ /// i.e, find the height to which the stack will be truncated when writing to `granting`.
+ fn find_first_write_incompatible(&self, granting: usize) -> usize {
+ let perm = self.get(granting).unwrap().perm();
+ match perm {
+ Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
+ Permission::Disabled => bug!("Cannot use Disabled for anything"),
+ Permission::Unique => {
+ // On a write, everything above us is incompatible.
+ granting + 1
+ }
+ Permission::SharedReadWrite => {
+ // The SharedReadWrite *just* above us are compatible, to skip those.
+ let mut idx = granting + 1;
+ while let Some(item) = self.get(idx) {
+ if item.perm() == Permission::SharedReadWrite {
+ // Go on.
+ idx += 1;
+ } else {
+ // Found first incompatible!
+ break;
+ }
+ }
+ idx
+ }
+ }
+ }
+
+ /// The given item was invalidated -- check its protectors for whether that will cause UB.
+ fn item_invalidated(
+ item: &Item,
+ global: &GlobalStateInner,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
+ cause: ItemInvalidationCause,
+ ) -> InterpResult<'tcx> {
+ if !global.tracked_pointer_tags.is_empty() {
+ dcx.check_tracked_tag_popped(item, global);
+ }
+
+ if !item.protected() {
+ return Ok(());
+ }
+
+ // We store tags twice, once in global.protected_tags and once in each call frame.
+ // We do this because consulting a single global set in this function is faster
+ // than attempting to search all call frames in the program for the `FrameExtra`
+ // (if any) which is protecting the popped tag.
+ //
+ // This duplication trades off making `end_call` slower to make this function faster. This
+ // trade-off is profitable in practice for a combination of two reasons.
+ // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
+ // Therefore, adding overhead in function call/return is profitable even if it only
+ // saves a little work in this function.
+ // 2. Most frames protect only one or two tags. So this duplicative global turns a search
+ // which ends up about linear in the number of protected tags in the program into a
+ // constant time check (and a slow linear, because the tags in the frames aren't contiguous).
+ if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
+ // The only way this is okay is if the protector is weak and we are deallocating with
+ // the right pointer.
+ let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
+ && matches!(protector_kind, ProtectorKind::WeakProtector);
+ if !allowed {
+ return Err(dcx.protector_error(item, protector_kind).into());
+ }
+ }
+ Ok(())
+ }
+
+ /// Test if a memory `access` using pointer tagged `tag` is granted.
+ /// If yes, return the index of the item that granted it.
+ /// `range` refers the entire operation, and `offset` refers to the specific offset into the
+ /// allocation that we are currently checking.
+ fn access(
+ &mut self,
+ access: AccessKind,
+ tag: ProvenanceExtra,
+ global: &GlobalStateInner,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
+ exposed_tags: &FxHashSet<BorTag>,
+ ) -> InterpResult<'tcx> {
+ // Two main steps: Find granting item, remove incompatible items above.
+
+ // Step 1: Find granting item.
+ let granting_idx =
+ self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
+
+ // Step 2: Remove incompatible items above them. Make sure we do not remove protected
+ // items. Behavior differs for reads and writes.
+ // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
+ if access == AccessKind::Write {
+ // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
+ // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
+ let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
+ // The granting_idx *might* be approximate, but any lower idx would remove more
+ // things. Even if this is a Unique and the lower idx is an SRW (which removes
+ // less), there is an SRW group boundary here so strictly more would get removed.
+ self.find_first_write_incompatible(granting_idx)
+ } else {
+ // We are writing to something in the unknown part.
+ // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
+ 0
+ };
+ self.pop_items_after(first_incompatible_idx, |item| {
+ Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
+ dcx.log_invalidation(item.tag());
+ Ok(())
+ })?;
+ } else {
+ // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
+ // The reason this is not following the stack discipline (by removing the first Unique and
+ // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
+ // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
+ // `SharedReadWrite` for `raw`.
+ // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
+ // reference and use that.
+ // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
+ let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
+ // The granting_idx *might* be approximate, but any lower idx would disable more things.
+ granting_idx + 1
+ } else {
+ // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
+ 0
+ };
+ self.disable_uniques_starting_at(first_incompatible_idx, |item| {
+ Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
+ dcx.log_invalidation(item.tag());
+ Ok(())
+ })?;
+ }
+
+ // If this was an approximate action, we now collapse everything into an unknown.
+ if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
+ // Compute the upper bound of the items that remain.
+ // (This is why we did all the work above: to reduce the items we have to consider here.)
+ let mut max = BorTag::one();
+ for i in 0..self.len() {
+ let item = self.get(i).unwrap();
+ // Skip disabled items, they cannot be matched anyway.
+ if !matches!(item.perm(), Permission::Disabled) {
+ // We are looking for a strict upper bound, so add 1 to this tag.
+ max = cmp::max(item.tag().succ().unwrap(), max);
+ }
+ }
+ if let Some(unk) = self.unknown_bottom() {
+ max = cmp::max(unk, max);
+ }
+ // Use `max` as new strict upper bound for everything.
+ trace!(
+ "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
+ max = max.get(),
+ );
+ self.set_unknown_bottom(max);
+ }
+
+ // Done.
+ Ok(())
+ }
+
+ /// Deallocate a location: Like a write access, but also there must be no
+ /// active protectors at all because we will remove all items.
+ fn dealloc(
+ &mut self,
+ tag: ProvenanceExtra,
+ global: &GlobalStateInner,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
+ exposed_tags: &FxHashSet<BorTag>,
+ ) -> InterpResult<'tcx> {
+ // Step 1: Make a write access.
+ // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
+ self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
+
+ // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
+ for idx in (0..self.len()).rev() {
+ let item = self.get(idx).unwrap();
+ Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
+ }
+
+ Ok(())
+ }
+
+ /// Derive a new pointer from one with the given tag.
+ ///
+ /// `access` indicates which kind of memory access this retag itself should correspond to.
+ fn grant(
+ &mut self,
+ derived_from: ProvenanceExtra,
+ new: Item,
+ access: Option<AccessKind>,
+ global: &GlobalStateInner,
+ dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
+ exposed_tags: &FxHashSet<BorTag>,
+ ) -> InterpResult<'tcx> {
+ dcx.start_grant(new.perm());
+
+ // Compute where to put the new item.
+ // Either way, we ensure that we insert the new item in a way such that between
+ // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
+ let new_idx = if let Some(access) = access {
+ // Simple case: We are just a regular memory access, and then push our thing on top,
+ // like a regular stack.
+ // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
+ self.access(access, derived_from, global, dcx, exposed_tags)?;
+
+ // We insert "as far up as possible": We know only compatible items are remaining
+ // on top of `derived_from`, and we want the new item at the top so that we
+ // get the strongest possible guarantees.
+ // This ensures U1 and F1.
+ self.len()
+ } else {
+ // The tricky case: creating a new SRW permission without actually being an access.
+ assert!(new.perm() == Permission::SharedReadWrite);
+
+ // First we figure out which item grants our parent (`derived_from`) this kind of access.
+ // We use that to determine where to put the new item.
+ let granting_idx = self
+ .find_granting(AccessKind::Write, derived_from, exposed_tags)
+ .map_err(|()| dcx.grant_error(self))?;
+
+ let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from) else {
+ // The parent is a wildcard pointer or matched the unknown bottom.
+ // This is approximate. Nobody knows what happened, so forget everything.
+ // The new thing is SRW anyway, so we cannot push it "on top of the unkown part"
+ // (for all we know, it might join an SRW group inside the unknown).
+ trace!("reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown");
+ self.set_unknown_bottom(global.next_ptr_tag);
+ return Ok(());
+ };
+
+ // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
+ // access. Instead of popping the stack, we insert the item at the place the stack would
+ // be popped to (i.e., we insert it above all the write-compatible items).
+ // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
+ self.find_first_write_incompatible(granting_idx)
+ };
+
+ // Put the new item there.
+ trace!("reborrow: adding item {:?}", new);
+ self.insert(new_idx, new);
+ Ok(())
+ }
+}
+// # Stacked Borrows Core End
+
+/// Integration with the BorTag garbage collector
+impl Stacks {
+ pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
+ if self.modified_since_last_gc {
+ for stack in self.stacks.iter_mut_all() {
+ if stack.len() > 64 {
+ stack.retain(live_tags);
+ }
+ }
+ self.modified_since_last_gc = false;
+ }
+ }
+}
+
+impl VisitTags for Stacks {
+ fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
+ for tag in self.exposed_tags.iter().copied() {
+ visit(tag);
+ }
+ }
+}
+
+/// Map per-stack operations to higher-level per-location-range operations.
+impl<'tcx> Stacks {
+ /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
+ /// the [`AllocId`] of the allocation this is associated with.
+ fn new(
+ size: Size,
+ perm: Permission,
+ tag: BorTag,
+ id: AllocId,
+ machine: &MiriMachine<'_, '_>,
+ ) -> Self {
+ let item = Item::new(tag, perm, false);
+ let stack = Stack::new(item);
+
+ Stacks {
+ stacks: RangeMap::new(size, stack),
+ history: AllocHistory::new(id, item, machine),
+ exposed_tags: FxHashSet::default(),
+ modified_since_last_gc: false,
+ }
+ }
+
+ /// Call `f` on every stack in the range.
+ fn for_each(
+ &mut self,
+ range: AllocRange,
+ mut dcx_builder: DiagnosticCxBuilder<'_, '_, 'tcx>,
+ mut f: impl FnMut(
+ &mut Stack,
+ &mut DiagnosticCx<'_, '_, '_, 'tcx>,
+ &mut FxHashSet<BorTag>,
+ ) -> InterpResult<'tcx>,
+ ) -> InterpResult<'tcx> {
+ self.modified_since_last_gc = true;
+ for (offset, stack) in self.stacks.iter_mut(range.start, range.size) {
+ let mut dcx = dcx_builder.build(&mut self.history, offset);
+ f(stack, &mut dcx, &mut self.exposed_tags)?;
+ dcx_builder = dcx.unbuild();
+ }
+ Ok(())
+ }
+}
+
+/// Glue code to connect with Miri Machine Hooks
+impl Stacks {
+ pub fn new_allocation(
+ id: AllocId,
+ size: Size,
+ state: &mut GlobalStateInner,
+ kind: MemoryKind<MiriMemoryKind>,
+ machine: &MiriMachine<'_, '_>,
+ ) -> Self {
+ let (base_tag, perm) = match kind {
+ // New unique borrow. This tag is not accessible by the program,
+ // so it will only ever be used when using the local directly (i.e.,
+ // not through a pointer). That is, whenever we directly write to a local, this will pop
+ // everything else off the stack, invalidating all previous pointers,
+ // and in particular, *all* raw pointers.
+ MemoryKind::Stack => (state.base_ptr_tag(id, machine), Permission::Unique),
+ // Everything else is shared by default.
+ _ => (state.base_ptr_tag(id, machine), Permission::SharedReadWrite),
+ };
+ Stacks::new(size, perm, base_tag, id, machine)
+ }
+
+ #[inline(always)]
+ pub fn before_memory_read<'tcx, 'mir, 'ecx>(
+ &mut self,
+ alloc_id: AllocId,
+ tag: ProvenanceExtra,
+ range: AllocRange,
+ machine: &'ecx MiriMachine<'mir, 'tcx>,
+ ) -> InterpResult<'tcx>
+ where
+ 'tcx: 'ecx,
+ {
+ trace!(
+ "read access with tag {:?}: {:?}, size {}",
+ tag,
+ Pointer::new(alloc_id, range.start),
+ range.size.bytes()
+ );
+ let dcx = DiagnosticCxBuilder::read(machine, tag, range);
+ let state = machine.borrow_tracker.as_ref().unwrap().borrow();
+ self.for_each(range, dcx, |stack, dcx, exposed_tags| {
+ stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
+ })
+ }
+
+ #[inline(always)]
+ pub fn before_memory_write<'tcx>(
+ &mut self,
+ alloc_id: AllocId,
+ tag: ProvenanceExtra,
+ range: AllocRange,
+ machine: &mut MiriMachine<'_, 'tcx>,
+ ) -> InterpResult<'tcx> {
+ trace!(
+ "write access with tag {:?}: {:?}, size {}",
+ tag,
+ Pointer::new(alloc_id, range.start),
+ range.size.bytes()
+ );
+ let dcx = DiagnosticCxBuilder::write(machine, tag, range);
+ let state = machine.borrow_tracker.as_ref().unwrap().borrow();
+ self.for_each(range, dcx, |stack, dcx, exposed_tags| {
+ stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
+ })
+ }
+
+ #[inline(always)]
+ pub fn before_memory_deallocation<'tcx>(
+ &mut self,
+ alloc_id: AllocId,
+ tag: ProvenanceExtra,
+ range: AllocRange,
+ machine: &mut MiriMachine<'_, 'tcx>,
+ ) -> InterpResult<'tcx> {
+ trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
+ let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
+ let state = machine.borrow_tracker.as_ref().unwrap().borrow();
+ self.for_each(range, dcx, |stack, dcx, exposed_tags| {
+ stack.dealloc(tag, &state, dcx, exposed_tags)
+ })?;
+ Ok(())
+ }
+
+ fn expose_tag(&mut self, tag: BorTag) {
+ self.exposed_tags.insert(tag);
+ }
+}
+
+/// Retagging/reborrowing. There is some policy in here, such as which permissions
+/// to grant for which references, and when to add protectors.
+impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx>
+ for crate::MiriInterpCx<'mir, 'tcx>
+{
+}
+trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'mir, 'tcx> {
+ /// Returns the `AllocId` the reborrow was done in, if some actual borrow stack manipulation
+ /// happened.
+ fn sb_reborrow(
+ &mut self,
+ place: &MPlaceTy<'tcx, Provenance>,
+ size: Size,
+ kind: RefKind,
+ retag_cause: RetagCause, // What caused this retag, for diagnostics only
+ new_tag: BorTag,
+ protect: Option<ProtectorKind>,
+ ) -> InterpResult<'tcx, Option<AllocId>> {
+ let this = self.eval_context_mut();
+
+ // It is crucial that this gets called on all code paths, to ensure we track tag creation.
+ let log_creation = |this: &MiriInterpCx<'mir, 'tcx>,
+ loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
+ -> InterpResult<'tcx> {
+ let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
+ let ty = place.layout.ty;
+ if global.tracked_pointer_tags.contains(&new_tag) {
+ let mut kind_str = format!("{kind}");
+ match kind {
+ RefKind::Unique { two_phase: false }
+ if !ty.is_unpin(*this.tcx, this.param_env()) =>
+ {
+ write!(kind_str, " (!Unpin pointee type {ty})").unwrap()
+ },
+ RefKind::Shared
+ if !ty.is_freeze(*this.tcx, this.param_env()) =>
+ {
+ write!(kind_str, " (!Freeze pointee type {ty})").unwrap()
+ },
+ _ => write!(kind_str, " (pointee type {ty})").unwrap(),
+ };
+ this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
+ new_tag.inner(),
+ Some(kind_str),
+ loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
+ ));
+ }
+ drop(global); // don't hold that reference any longer than we have to
+
+ let Some((alloc_id, base_offset, orig_tag)) = loc else {
+ return Ok(())
+ };
+
+ let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id);
+ match alloc_kind {
+ AllocKind::LiveData => {
+ // This should have alloc_extra data, but `get_alloc_extra` can still fail
+ // if converting this alloc_id from a global to a local one
+ // uncovers a non-supported `extern static`.
+ let extra = this.get_alloc_extra(alloc_id)?;
+ let mut stacked_borrows = extra
+ .borrow_tracker
+ .as_ref()
+ .expect("We should have borrow tracking data")
+ .assert_sb()
+ .borrow_mut();
+ // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
+ // FIXME: can this be done cleaner?
+ let dcx = DiagnosticCxBuilder::retag(
+ &this.machine,
+ retag_cause,
+ new_tag,
+ orig_tag,
+ alloc_range(base_offset, size),
+ );
+ let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
+ dcx.log_creation();
+ if protect.is_some() {
+ dcx.log_protector();
+ }
+ },
+ AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
+ // No stacked borrows on these allocations.
+ }
+ }
+ Ok(())
+ };
+
+ if size == Size::ZERO {
+ trace!(
+ "reborrow of size 0: {} reference {:?} derived from {:?} (pointee {})",
+ kind,
+ new_tag,
+ place.ptr,
+ place.layout.ty,
+ );
+ // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
+ // touches no bytes so there is no stack to put this tag in.
+ // However, if the pointer for this operation points at a real allocation we still
+ // record where it was created so that we can issue a helpful diagnostic if there is an
+ // attempt to use it for a non-zero-sized access.
+ // Dangling slices are a common case here; it's valid to get their length but with raw
+ // pointer tagging for example all calls to get_unchecked on them are invalid.
+ if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr) {
+ log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
+ return Ok(Some(alloc_id));
+ }
+ // This pointer doesn't come with an AllocId. :shrug:
+ log_creation(this, None)?;
+ return Ok(None);
+ }
+
+ let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr)?;
+ log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
+
+ // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
+ let (alloc_size, _) = this.get_live_alloc_size_and_align(alloc_id)?;
+ if base_offset + size > alloc_size {
+ throw_ub!(PointerOutOfBounds {
+ alloc_id,
+ alloc_size,
+ ptr_offset: this.machine_usize_to_isize(base_offset.bytes()),
+ ptr_size: size,
+ msg: CheckInAllocMsg::InboundsTest
+ });
+ }
+
+ trace!(
+ "reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
+ kind,
+ new_tag,
+ orig_tag,
+ place.layout.ty,
+ Pointer::new(alloc_id, base_offset),
+ size.bytes()
+ );
+
+ if let Some(protect) = protect {
+ // See comment in `Stack::item_invalidated` for why we store the tag twice.
+ this.frame_mut().extra.borrow_tracker.as_mut().unwrap().protected_tags.push(new_tag);
+ this.machine
+ .borrow_tracker
+ .as_mut()
+ .unwrap()
+ .get_mut()
+ .protected_tags
+ .insert(new_tag, protect);
+ }
+
+ // Update the stacks.
+ // Make sure that raw pointers and mutable shared references are reborrowed "weak":
+ // There could be existing unique pointers reborrowed from them that should remain valid!
+ let (perm, access) = match kind {
+ RefKind::Unique { two_phase } => {
+ // Permission is Unique only if the type is `Unpin` and this is not twophase
+ let perm = if !two_phase && place.layout.ty.is_unpin(*this.tcx, this.param_env()) {
+ Permission::Unique
+ } else {
+ Permission::SharedReadWrite
+ };
+ // We do an access for all full borrows, even if `!Unpin`.
+ let access = if !two_phase { Some(AccessKind::Write) } else { None };
+ (perm, access)
+ }
+ RefKind::Raw { mutable: true } => {
+ // Creating a raw ptr does not count as an access
+ (Permission::SharedReadWrite, None)
+ }
+ RefKind::Shared | RefKind::Raw { mutable: false } => {
+ // Shared references and *const are a whole different kind of game, the
+ // permission is not uniform across the entire range!
+ // We need a frozen-sensitive reborrow.
+ // We have to use shared references to alloc/memory_extra here since
+ // `visit_freeze_sensitive` needs to access the global state.
+ let alloc_extra = this.get_alloc_extra(alloc_id)?;
+ let mut stacked_borrows = alloc_extra
+ .borrow_tracker
+ .as_ref()
+ .expect("We should have borrow tracking data")
+ .assert_sb()
+ .borrow_mut();
+ this.visit_freeze_sensitive(place, size, |mut range, frozen| {
+ // Adjust range.
+ range.start += base_offset;
+ // We are only ever `SharedReadOnly` inside the frozen bits.
+ let (perm, access) = if frozen {
+ (Permission::SharedReadOnly, Some(AccessKind::Read))
+ } else {
+ // Inside UnsafeCell, this does *not* count as an access, as there
+ // might actually be mutable references further up the stack that
+ // we have to keep alive.
+ (Permission::SharedReadWrite, None)
+ };
+ let protected = if frozen {
+ protect.is_some()
+ } else {
+ // We do not protect inside UnsafeCell.
+ // This fixes https://github.com/rust-lang/rust/issues/55005.
+ false
+ };
+ let item = Item::new(new_tag, perm, protected);
+ let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
+ let dcx = DiagnosticCxBuilder::retag(
+ &this.machine,
+ retag_cause,
+ new_tag,
+ orig_tag,
+ alloc_range(base_offset, size),
+ );
+ stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
+ stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
+ })?;
+ drop(global);
+ if let Some(access) = access {
+ assert_eq!(access, AccessKind::Read);
+ // Make sure the data race model also knows about this.
+ if let Some(data_race) = alloc_extra.data_race.as_ref() {
+ data_race.read(alloc_id, range, &this.machine)?;
+ }
+ }
+ Ok(())
+ })?;
+ return Ok(Some(alloc_id));
+ }
+ };
+
+ // Here we can avoid `borrow()` calls because we have mutable references.
+ // Note that this asserts that the allocation is mutable -- but since we are creating a
+ // mutable pointer, that seems reasonable.
+ let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
+ let stacked_borrows = alloc_extra
+ .borrow_tracker
+ .as_mut()
+ .expect("We should have borrow tracking data")
+ .assert_sb_mut()
+ .get_mut();
+ let item = Item::new(new_tag, perm, protect.is_some());
+ let range = alloc_range(base_offset, size);
+ let global = machine.borrow_tracker.as_ref().unwrap().borrow();
+ let dcx = DiagnosticCxBuilder::retag(
+ machine,
+ retag_cause,
+ new_tag,
+ orig_tag,
+ alloc_range(base_offset, size),
+ );
+ stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
+ stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
+ })?;
+ drop(global);
+ if let Some(access) = access {
+ assert_eq!(access, AccessKind::Write);
+ // Make sure the data race model also knows about this.
+ if let Some(data_race) = alloc_extra.data_race.as_mut() {
+ data_race.write(alloc_id, range, machine)?;
+ }
+ }
+
+ Ok(Some(alloc_id))
+ }
+
+ /// Retags an indidual pointer, returning the retagged version.
+ /// `kind` indicates what kind of reference is being created.
+ fn sb_retag_reference(
+ &mut self,
+ val: &ImmTy<'tcx, Provenance>,
+ kind: RefKind,
+ retag_cause: RetagCause, // What caused this retag, for diagnostics only
+ protect: Option<ProtectorKind>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
+ let this = self.eval_context_mut();
+ // We want a place for where the ptr *points to*, so we get one.
+ let place = this.ref_to_mplace(val)?;
+ let size = this.size_and_align_of_mplace(&place)?.map(|(size, _)| size);
+ // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
+ // bail out -- we cannot reasonably figure out which memory range to reborrow.
+ // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
+ let size = match size {
+ Some(size) => size,
+ None => return Ok(val.clone()),
+ };
+
+ // Compute new borrow.
+ let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
+
+ // Reborrow.
+ let alloc_id = this.sb_reborrow(&place, size, kind, retag_cause, new_tag, protect)?;
+
+ // Adjust pointer.
+ let new_place = place.map_provenance(|p| {
+ p.map(|prov| {
+ match alloc_id {
+ Some(alloc_id) => {
+ // If `reborrow` could figure out the AllocId of this ptr, hard-code it into the new one.
+ // Even if we started out with a wildcard, this newly retagged pointer is tied to that allocation.
+ Provenance::Concrete { alloc_id, tag: new_tag }
+ }
+ None => {
+ // Looks like this has to stay a wildcard pointer.
+ assert!(matches!(prov, Provenance::Wildcard));
+ Provenance::Wildcard
+ }
+ }
+ })
+ });
+
+ // Return new pointer.
+ Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
+ }
+}
+
+impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
+pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
+ fn sb_retag(
+ &mut self,
+ kind: RetagKind,
+ place: &PlaceTy<'tcx, Provenance>,
+ ) -> InterpResult<'tcx> {
+ let this = self.eval_context_mut();
+ let retag_fields = this.machine.borrow_tracker.as_mut().unwrap().get_mut().retag_fields;
+ let retag_cause = match kind {
+ RetagKind::TwoPhase { .. } => RetagCause::TwoPhase,
+ RetagKind::FnEntry => RetagCause::FnEntry,
+ RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
+ };
+ let mut visitor = RetagVisitor { ecx: this, kind, retag_cause, retag_fields };
+ return visitor.visit_value(place);
+
+ // The actual visitor.
+ struct RetagVisitor<'ecx, 'mir, 'tcx> {
+ ecx: &'ecx mut MiriInterpCx<'mir, 'tcx>,
+ kind: RetagKind,
+ retag_cause: RetagCause,
+ retag_fields: RetagFields,
+ }
+ impl<'ecx, 'mir, 'tcx> RetagVisitor<'ecx, 'mir, 'tcx> {
+ #[inline(always)] // yes this helps in our benchmarks
+ fn retag_place(
+ &mut self,
+ place: &PlaceTy<'tcx, Provenance>,
+ ref_kind: RefKind,
+ retag_cause: RetagCause,
+ protector: Option<ProtectorKind>,
+ ) -> InterpResult<'tcx> {
+ let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
+ let val = self.ecx.sb_retag_reference(&val, ref_kind, retag_cause, protector)?;
+ self.ecx.write_immediate(*val, place)?;
+ Ok(())
+ }
+ }
+ impl<'ecx, 'mir, 'tcx> MutValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>>
+ for RetagVisitor<'ecx, 'mir, 'tcx>
+ {
+ type V = PlaceTy<'tcx, Provenance>;
+
+ #[inline(always)]
+ fn ecx(&mut self) -> &mut MiriInterpCx<'mir, 'tcx> {
+ self.ecx
+ }
+
+ fn visit_box(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
+ // Boxes get a weak protectors, since they may be deallocated.
+ self.retag_place(
+ place,
+ RefKind::Unique { two_phase: false },
+ self.retag_cause,
+ /*protector*/
+ (self.kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
+ )
+ }
+
+ fn visit_value(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
+ // If this place is smaller than a pointer, we know that it can't contain any
+ // pointers we need to retag, so we can stop recursion early.
+ // This optimization is crucial for ZSTs, because they can contain way more fields
+ // than we can ever visit.
+ if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
+ return Ok(());
+ }
+
+ // Check the type of this value to see what to do with it (retag, or recurse).
+ match place.layout.ty.kind() {
+ ty::Ref(_, _, mutbl) => {
+ let ref_kind = match mutbl {
+ Mutability::Mut =>
+ RefKind::Unique { two_phase: self.kind == RetagKind::TwoPhase },
+ Mutability::Not => RefKind::Shared,
+ };
+ self.retag_place(
+ place,
+ ref_kind,
+ self.retag_cause,
+ /*protector*/
+ (self.kind == RetagKind::FnEntry)
+ .then_some(ProtectorKind::StrongProtector),
+ )?;
+ }
+ ty::RawPtr(tym) => {
+ // We definitely do *not* want to recurse into raw pointers -- wide raw
+ // pointers have fields, and for dyn Trait pointees those can have reference
+ // type!
+ if self.kind == RetagKind::Raw {
+ // Raw pointers need to be enabled.
+ self.retag_place(
+ place,
+ RefKind::Raw { mutable: tym.mutbl == Mutability::Mut },
+ self.retag_cause,
+ /*protector*/ None,
+ )?;
+ }
+ }
+ _ if place.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_box()) => {
+ // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
+ // (Yes this means we technically also recursively retag the allocator itself
+ // even if field retagging is not enabled. *shrug*)
+ self.walk_value(place)?;
+ }
+ _ => {
+ // Not a reference/pointer/box. Only recurse if configured appropriately.
+ let recurse = match self.retag_fields {
+ RetagFields::No => false,
+ RetagFields::Yes => true,
+ RetagFields::OnlyScalar => {
+ // Matching `ArgAbi::new` at the time of writing, only fields of
+ // `Scalar` and `ScalarPair` ABI are considered.
+ matches!(place.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..))
+ }
+ };
+ if recurse {
+ self.walk_value(place)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+ }
+ }
+
+ /// After a stack frame got pushed, retag the return place so that we are sure
+ /// it does not alias with anything.
+ ///
+ /// This is a HACK because there is nothing in MIR that would make the retag
+ /// explicit. Also see <https://github.com/rust-lang/rust/issues/71117>.
+ fn sb_retag_return_place(&mut self) -> InterpResult<'tcx> {
+ let this = self.eval_context_mut();
+ let return_place = &this.frame().return_place;
+ if return_place.layout.is_zst() {
+ // There may not be any memory here, nothing to do.
+ return Ok(());
+ }
+ // We need this to be in-memory to use tagged pointers.
+ let return_place = this.force_allocation(&return_place.clone())?;
+
+ // We have to turn the place into a pointer to use the existing code.
+ // (The pointer type does not matter, so we use a raw pointer.)
+ let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
+ let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
+ // Reborrow it. With protection! That is part of the point.
+ let val = this.sb_retag_reference(
+ &val,
+ RefKind::Unique { two_phase: false },
+ RetagCause::FnReturn,
+ /*protector*/ Some(ProtectorKind::StrongProtector),
+ )?;
+ // And use reborrowed pointer for return place.
+ let return_place = this.ref_to_mplace(&val)?;
+ this.frame_mut().return_place = return_place.into();
+
+ Ok(())
+ }
+
+ /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
+ fn sb_expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
+ let this = self.eval_context_mut();
+
+ // Function pointers and dead objects don't have an alloc_extra so we ignore them.
+ // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
+ // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
+ let (_size, _align, kind) = this.get_alloc_info(alloc_id);
+ match kind {
+ AllocKind::LiveData => {
+ // This should have alloc_extra data, but `get_alloc_extra` can still fail
+ // if converting this alloc_id from a global to a local one
+ // uncovers a non-supported `extern static`.
+ let alloc_extra = this.get_alloc_extra(alloc_id)?;
+ trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
+ alloc_extra
+ .borrow_tracker
+ .as_ref()
+ .expect("We should have borrow tracking data")
+ .assert_sb()
+ .borrow_mut()
+ .expose_tag(tag);
+ }
+ AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
+ // No stacked borrows on these allocations.
+ }
+ }
+ Ok(())
+ }
+
+ fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
+ let this = self.eval_context_mut();
+ let alloc_extra = this.get_alloc_extra(alloc_id)?;
+ let stacks = alloc_extra
+ .borrow_tracker
+ .as_ref()
+ .expect("We should have borrow tracking data")
+ .assert_sb()
+ .borrow();
+ for (range, stack) in stacks.stacks.iter_all() {
+ print!("{range:?}: [");
+ if let Some(bottom) = stack.unknown_bottom() {
+ print!(" unknown-bottom(..{bottom:?})");
+ }
+ for i in 0..stack.len() {
+ let item = stack.get(i).unwrap();
+ print!(" {:?}{:?}", item.perm(), item.tag());
+ }
+ println!(" ]");
+ }
+ Ok(())
+ }
+}
--- /dev/null
+#[cfg(feature = "stack-cache")]
+use std::ops::Range;
+
+use rustc_data_structures::fx::FxHashSet;
+
+use crate::borrow_tracker::{
+ stacked_borrows::{Item, Permission},
+ AccessKind, BorTag,
+};
+use crate::ProvenanceExtra;
+
+/// Exactly what cache size we should use is a difficult tradeoff. There will always be some
+/// workload which has a `BorTag` working set which exceeds the size of the cache, and ends up
+/// falling back to linear searches of the borrow stack very often.
+/// The cost of making this value too large is that the loop in `Stack::insert` which ensures the
+/// entries in the cache stay correct after an insert becomes expensive.
+#[cfg(feature = "stack-cache")]
+const CACHE_LEN: usize = 32;
+
+/// Extra per-location state.
+#[derive(Clone, Debug)]
+pub struct Stack {
+ /// Used *mostly* as a stack; never empty.
+ /// Invariants:
+ /// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
+ /// * Except for `Untagged`, no tag occurs in the stack more than once.
+ borrows: Vec<Item>,
+ /// If this is `Some(id)`, then the actual current stack is unknown. This can happen when
+ /// wildcard pointers are used to access this location. What we do know is that `borrows` are at
+ /// the top of the stack, and below it are arbitrarily many items whose `tag` is strictly less
+ /// than `id`.
+ /// When the bottom is unknown, `borrows` always has a `SharedReadOnly` or `Unique` at the bottom;
+ /// we never have the unknown-to-known boundary in an SRW group.
+ unknown_bottom: Option<BorTag>,
+
+ /// A small LRU cache of searches of the borrow stack.
+ #[cfg(feature = "stack-cache")]
+ cache: StackCache,
+ /// On a read, we need to disable all `Unique` above the granting item. We can avoid most of
+ /// this scan by keeping track of the region of the borrow stack that may contain `Unique`s.
+ #[cfg(feature = "stack-cache")]
+ unique_range: Range<usize>,
+}
+
+impl Stack {
+ pub fn retain(&mut self, tags: &FxHashSet<BorTag>) {
+ let mut first_removed = None;
+
+ // We never consider removing the bottom-most tag. For stacks without an unknown
+ // bottom this preserves the base tag.
+ // Note that the algorithm below is based on considering the tag at read_idx - 1,
+ // so precisely considering the tag at index 0 for removal when we have an unknown
+ // bottom would complicate the implementation. The simplification of not considering
+ // it does not have a significant impact on the degree to which the GC mititages
+ // memory growth.
+ let mut read_idx = 1;
+ let mut write_idx = read_idx;
+ while read_idx < self.borrows.len() {
+ let left = self.borrows[read_idx - 1];
+ let this = self.borrows[read_idx];
+ let should_keep = match this.perm() {
+ // SharedReadWrite is the simplest case, if it's unreachable we can just remove it.
+ Permission::SharedReadWrite => tags.contains(&this.tag()),
+ // Only retain a Disabled tag if it is terminating a SharedReadWrite block.
+ Permission::Disabled => left.perm() == Permission::SharedReadWrite,
+ // Unique and SharedReadOnly can terminate a SharedReadWrite block, so only remove
+ // them if they are both unreachable and not directly after a SharedReadWrite.
+ Permission::Unique | Permission::SharedReadOnly =>
+ left.perm() == Permission::SharedReadWrite || tags.contains(&this.tag()),
+ };
+
+ if should_keep {
+ if read_idx != write_idx {
+ self.borrows[write_idx] = self.borrows[read_idx];
+ }
+ write_idx += 1;
+ } else if first_removed.is_none() {
+ first_removed = Some(read_idx);
+ }
+
+ read_idx += 1;
+ }
+ self.borrows.truncate(write_idx);
+
+ #[cfg(not(feature = "stack-cache"))]
+ drop(first_removed); // This is only needed for the stack-cache
+
+ #[cfg(feature = "stack-cache")]
+ if let Some(first_removed) = first_removed {
+ // Either end of unique_range may have shifted, all we really know is that we can't
+ // have introduced a new Unique.
+ if !self.unique_range.is_empty() {
+ self.unique_range = 0..self.len();
+ }
+
+ // Replace any Items which have been collected with the base item, a known-good value.
+ for i in 0..CACHE_LEN {
+ if self.cache.idx[i] >= first_removed {
+ self.cache.items[i] = self.borrows[0];
+ self.cache.idx[i] = 0;
+ }
+ }
+ }
+ }
+}
+
+/// A very small cache of searches of a borrow stack, mapping `Item`s to their position in said stack.
+///
+/// It may seem like maintaining this cache is a waste for small stacks, but
+/// (a) iterating over small fixed-size arrays is super fast, and (b) empirically this helps *a lot*,
+/// probably because runtime is dominated by large stacks.
+#[cfg(feature = "stack-cache")]
+#[derive(Clone, Debug)]
+struct StackCache {
+ items: [Item; CACHE_LEN], // Hot in find_granting
+ idx: [usize; CACHE_LEN], // Hot in grant
+}
+
+#[cfg(feature = "stack-cache")]
+impl StackCache {
+ /// When a tag is used, we call this function to add or refresh it in the cache.
+ ///
+ /// We use the position in the cache to represent how recently a tag was used; the first position
+ /// is the most recently used tag. So an add shifts every element towards the end, and inserts
+ /// the new element at the start. We lose the last element.
+ /// This strategy is effective at keeping the most-accessed items in the cache, but it costs a
+ /// linear shift across the entire cache when we add a new tag.
+ fn add(&mut self, idx: usize, item: Item) {
+ self.items.copy_within(0..CACHE_LEN - 1, 1);
+ self.items[0] = item;
+ self.idx.copy_within(0..CACHE_LEN - 1, 1);
+ self.idx[0] = idx;
+ }
+}
+
+impl PartialEq for Stack {
+ fn eq(&self, other: &Self) -> bool {
+ // All the semantics of Stack are in self.borrows, everything else is caching
+ self.borrows == other.borrows
+ }
+}
+
+impl Eq for Stack {}
+
+impl<'tcx> Stack {
+ /// Panics if any of the caching mechanisms have broken,
+ /// - The StackCache indices don't refer to the parallel items,
+ /// - There are no Unique items outside of first_unique..last_unique
+ #[cfg(all(feature = "stack-cache", debug_assertions))]
+ fn verify_cache_consistency(&self) {
+ // Only a full cache needs to be valid. Also see the comments in find_granting_cache
+ // and set_unknown_bottom.
+ if self.borrows.len() >= CACHE_LEN {
+ for (tag, stack_idx) in self.cache.items.iter().zip(self.cache.idx.iter()) {
+ assert_eq!(self.borrows[*stack_idx], *tag);
+ }
+ }
+
+ // Check that all Unique items fall within unique_range.
+ for (idx, item) in self.borrows.iter().enumerate() {
+ if item.perm() == Permission::Unique {
+ assert!(
+ self.unique_range.contains(&idx),
+ "{:?} {:?}",
+ self.unique_range,
+ self.borrows
+ );
+ }
+ }
+
+ // Check that the unique_range is a valid index into the borrow stack.
+ // This asserts that the unique_range's start <= end.
+ let _uniques = &self.borrows[self.unique_range.clone()];
+
+ // We cannot assert that the unique range is precise.
+ // Both ends may shift around when `Stack::retain` is called. Additionally,
+ // when we pop items within the unique range, setting the end of the range precisely
+ // requires doing a linear search of the borrow stack, which is exactly the kind of
+ // operation that all this caching exists to avoid.
+ }
+
+ /// Find the item granting the given kind of access to the given tag, and return where
+ /// it is on the stack. For wildcard tags, the given index is approximate, but if *no*
+ /// index is given it means the match was *not* in the known part of the stack.
+ /// `Ok(None)` indicates it matched the "unknown" part of the stack.
+ /// `Err` indicates it was not found.
+ pub(super) fn find_granting(
+ &mut self,
+ access: AccessKind,
+ tag: ProvenanceExtra,
+ exposed_tags: &FxHashSet<BorTag>,
+ ) -> Result<Option<usize>, ()> {
+ #[cfg(all(feature = "stack-cache", debug_assertions))]
+ self.verify_cache_consistency();
+
+ let ProvenanceExtra::Concrete(tag) = tag else {
+ // Handle the wildcard case.
+ // Go search the stack for an exposed tag.
+ if let Some(idx) =
+ self.borrows
+ .iter()
+ .enumerate() // we also need to know *where* in the stack
+ .rev() // search top-to-bottom
+ .find_map(|(idx, item)| {
+ // If the item fits and *might* be this wildcard, use it.
+ if item.perm().grants(access) && exposed_tags.contains(&item.tag()) {
+ Some(idx)
+ } else {
+ None
+ }
+ })
+ {
+ return Ok(Some(idx));
+ }
+ // If we couldn't find it in the stack, check the unknown bottom.
+ return if self.unknown_bottom.is_some() { Ok(None) } else { Err(()) };
+ };
+
+ if let Some(idx) = self.find_granting_tagged(access, tag) {
+ return Ok(Some(idx));
+ }
+
+ // Couldn't find it in the stack; but if there is an unknown bottom it might be there.
+ let found = self.unknown_bottom.is_some_and(|unknown_limit| {
+ tag < unknown_limit // unknown_limit is an upper bound for what can be in the unknown bottom.
+ });
+ if found { Ok(None) } else { Err(()) }
+ }
+
+ fn find_granting_tagged(&mut self, access: AccessKind, tag: BorTag) -> Option<usize> {
+ #[cfg(feature = "stack-cache")]
+ if let Some(idx) = self.find_granting_cache(access, tag) {
+ return Some(idx);
+ }
+
+ // If we didn't find the tag in the cache, fall back to a linear search of the
+ // whole stack, and add the tag to the cache.
+ for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
+ if tag == item.tag() && item.perm().grants(access) {
+ #[cfg(feature = "stack-cache")]
+ self.cache.add(stack_idx, *item);
+ return Some(stack_idx);
+ }
+ }
+ None
+ }
+
+ #[cfg(feature = "stack-cache")]
+ fn find_granting_cache(&mut self, access: AccessKind, tag: BorTag) -> Option<usize> {
+ // This looks like a common-sense optimization; we're going to do a linear search of the
+ // cache or the borrow stack to scan the shorter of the two. This optimization is miniscule
+ // and this check actually ensures we do not access an invalid cache.
+ // When a stack is created and when items are removed from the top of the borrow stack, we
+ // need some valid value to populate the cache. In both cases, we try to use the bottom
+ // item. But when the stack is cleared in `set_unknown_bottom` there is nothing we could
+ // place in the cache that is correct. But due to the way we populate the cache in
+ // `StackCache::add`, we know that when the borrow stack has grown larger than the cache,
+ // every slot in the cache is valid.
+ if self.borrows.len() <= CACHE_LEN {
+ return None;
+ }
+ // Search the cache for the tag we're looking up
+ let cache_idx = self.cache.items.iter().position(|t| t.tag() == tag)?;
+ let stack_idx = self.cache.idx[cache_idx];
+ // If we found the tag, look up its position in the stack to see if it grants
+ // the required permission
+ if self.cache.items[cache_idx].perm().grants(access) {
+ // If it does, and it's not already in the most-recently-used position, re-insert it at
+ // the most-recently-used position. This technically reduces the efficiency of the
+ // cache by duplicating elements, but current benchmarks do not seem to benefit from
+ // avoiding this duplication.
+ // But if the tag is in position 1, avoiding the duplicating add is trivial.
+ // If it does, and it's not already in the most-recently-used position, move it there.
+ // Except if the tag is in position 1, this is equivalent to just a swap, so do that.
+ if cache_idx == 1 {
+ self.cache.items.swap(0, 1);
+ self.cache.idx.swap(0, 1);
+ } else if cache_idx > 1 {
+ self.cache.add(stack_idx, self.cache.items[cache_idx]);
+ }
+ Some(stack_idx)
+ } else {
+ // Tag is in the cache, but it doesn't grant the required permission
+ None
+ }
+ }
+
+ pub fn insert(&mut self, new_idx: usize, new: Item) {
+ self.borrows.insert(new_idx, new);
+
+ #[cfg(feature = "stack-cache")]
+ self.insert_cache(new_idx, new);
+ }
+
+ #[cfg(feature = "stack-cache")]
+ fn insert_cache(&mut self, new_idx: usize, new: Item) {
+ // Adjust the possibly-unique range if an insert occurs before or within it
+ if self.unique_range.start >= new_idx {
+ self.unique_range.start += 1;
+ }
+ if self.unique_range.end >= new_idx {
+ self.unique_range.end += 1;
+ }
+ if new.perm() == Permission::Unique {
+ // If this is the only Unique, set the range to contain just the new item.
+ if self.unique_range.is_empty() {
+ self.unique_range = new_idx..new_idx + 1;
+ } else {
+ // We already have other Unique items, expand the range to include the new item
+ self.unique_range.start = self.unique_range.start.min(new_idx);
+ self.unique_range.end = self.unique_range.end.max(new_idx + 1);
+ }
+ }
+
+ // The above insert changes the meaning of every index in the cache >= new_idx, so now
+ // we need to find every one of those indexes and increment it.
+ // But if the insert is at the end (equivalent to a push), we can skip this step because
+ // it didn't change the position of any other items.
+ if new_idx != self.borrows.len() - 1 {
+ for idx in &mut self.cache.idx {
+ if *idx >= new_idx {
+ *idx += 1;
+ }
+ }
+ }
+
+ // This primes the cache for the next access, which is almost always the just-added tag.
+ self.cache.add(new_idx, new);
+
+ #[cfg(debug_assertions)]
+ self.verify_cache_consistency();
+ }
+
+ /// Construct a new `Stack` using the passed `Item` as the base tag.
+ pub fn new(item: Item) -> Self {
+ Stack {
+ borrows: vec![item],
+ unknown_bottom: None,
+ #[cfg(feature = "stack-cache")]
+ cache: StackCache { idx: [0; CACHE_LEN], items: [item; CACHE_LEN] },
+ #[cfg(feature = "stack-cache")]
+ unique_range: if item.perm() == Permission::Unique { 0..1 } else { 0..0 },
+ }
+ }
+
+ pub fn get(&self, idx: usize) -> Option<Item> {
+ self.borrows.get(idx).cloned()
+ }
+
+ #[allow(clippy::len_without_is_empty)] // Stacks are never empty
+ pub fn len(&self) -> usize {
+ self.borrows.len()
+ }
+
+ pub fn unknown_bottom(&self) -> Option<BorTag> {
+ self.unknown_bottom
+ }
+
+ pub fn set_unknown_bottom(&mut self, tag: BorTag) {
+ // We clear the borrow stack but the lookup cache doesn't support clearing per se. Instead,
+ // there is a check explained in `find_granting_cache` which protects against accessing the
+ // cache when it has been cleared and not yet refilled.
+ self.borrows.clear();
+ self.unknown_bottom = Some(tag);
+ #[cfg(feature = "stack-cache")]
+ {
+ self.unique_range = 0..0;
+ }
+ }
+
+ /// Find all `Unique` elements in this borrow stack above `granting_idx`, pass a copy of them
+ /// to the `visitor`, then set their `Permission` to `Disabled`.
+ pub fn disable_uniques_starting_at(
+ &mut self,
+ disable_start: usize,
+ mut visitor: impl FnMut(Item) -> crate::InterpResult<'tcx>,
+ ) -> crate::InterpResult<'tcx> {
+ #[cfg(feature = "stack-cache")]
+ let unique_range = self.unique_range.clone();
+ #[cfg(not(feature = "stack-cache"))]
+ let unique_range = 0..self.len();
+
+ if disable_start <= unique_range.end {
+ let lower = unique_range.start.max(disable_start);
+ let upper = unique_range.end;
+ for item in &mut self.borrows[lower..upper] {
+ if item.perm() == Permission::Unique {
+ log::trace!("access: disabling item {:?}", item);
+ visitor(*item)?;
+ item.set_permission(Permission::Disabled);
+ // Also update all copies of this item in the cache.
+ #[cfg(feature = "stack-cache")]
+ for it in &mut self.cache.items {
+ if it.tag() == item.tag() {
+ it.set_permission(Permission::Disabled);
+ }
+ }
+ }
+ }
+ }
+
+ #[cfg(feature = "stack-cache")]
+ if disable_start <= self.unique_range.start {
+ // We disabled all Unique items
+ self.unique_range.start = 0;
+ self.unique_range.end = 0;
+ } else {
+ // Truncate the range to only include items up to the index that we started disabling
+ // at.
+ self.unique_range.end = self.unique_range.end.min(disable_start);
+ }
+
+ #[cfg(all(feature = "stack-cache", debug_assertions))]
+ self.verify_cache_consistency();
+
+ Ok(())
+ }
+
+ /// Produces an iterator which iterates over `range` in reverse, and when dropped removes that
+ /// range of `Item`s from this `Stack`.
+ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
+ &mut self,
+ start: usize,
+ mut visitor: V,
+ ) -> crate::InterpResult<'tcx> {
+ while self.borrows.len() > start {
+ let item = self.borrows.pop().unwrap();
+ visitor(item)?;
+ }
+
+ #[cfg(feature = "stack-cache")]
+ if !self.borrows.is_empty() {
+ // After we remove from the borrow stack, every aspect of our caching may be invalid, but it is
+ // also possible that the whole cache is still valid. So we call this method to repair what
+ // aspects of the cache are now invalid, instead of resetting the whole thing to a trivially
+ // valid default state.
+ let base_tag = self.borrows[0];
+ let mut removed = 0;
+ let mut cursor = 0;
+ // Remove invalid entries from the cache by rotating them to the end of the cache, then
+ // keep track of how many invalid elements there are and overwrite them with the base tag.
+ // The base tag here serves as a harmless default value.
+ for _ in 0..CACHE_LEN - 1 {
+ if self.cache.idx[cursor] >= start {
+ self.cache.idx[cursor..CACHE_LEN - removed].rotate_left(1);
+ self.cache.items[cursor..CACHE_LEN - removed].rotate_left(1);
+ removed += 1;
+ } else {
+ cursor += 1;
+ }
+ }
+ for i in CACHE_LEN - removed - 1..CACHE_LEN {
+ self.cache.idx[i] = 0;
+ self.cache.items[i] = base_tag;
+ }
+
+ if start <= self.unique_range.start {
+ // We removed all the Unique items
+ self.unique_range = 0..0;
+ } else {
+ // Ensure the range doesn't extend past the new top of the stack
+ self.unique_range.end = self.unique_range.end.min(start);
+ }
+ } else {
+ self.unique_range = 0..0;
+ }
+
+ #[cfg(all(feature = "stack-cache", debug_assertions))]
+ self.verify_cache_consistency();
+ Ok(())
+ }
+}
+++ /dev/null
-use smallvec::SmallVec;
-use std::fmt;
-
-use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange};
-use rustc_span::{Span, SpanData};
-use rustc_target::abi::Size;
-
-use crate::stacked_borrows::{
- err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind, Stack,
-};
-use crate::*;
-
-use rustc_middle::mir::interpret::InterpError;
-
-#[derive(Clone, Debug)]
-pub struct AllocHistory {
- id: AllocId,
- base: (Item, Span),
- creations: smallvec::SmallVec<[Creation; 1]>,
- invalidations: smallvec::SmallVec<[Invalidation; 1]>,
- protectors: smallvec::SmallVec<[Protection; 1]>,
-}
-
-#[derive(Clone, Debug)]
-struct Creation {
- retag: RetagOp,
- span: Span,
-}
-
-impl Creation {
- fn generate_diagnostic(&self) -> (String, SpanData) {
- let tag = self.retag.new_tag;
- if let Some(perm) = self.retag.permission {
- (
- format!(
- "{tag:?} was created by a {:?} retag at offsets {:?}",
- perm, self.retag.range,
- ),
- self.span.data(),
- )
- } else {
- assert!(self.retag.range.size == Size::ZERO);
- (
- format!(
- "{tag:?} would have been created here, but this is a zero-size retag ({:?}) so the tag in question does not exist anywhere",
- self.retag.range,
- ),
- self.span.data(),
- )
- }
- }
-}
-
-#[derive(Clone, Debug)]
-struct Invalidation {
- tag: SbTag,
- range: AllocRange,
- span: Span,
- cause: InvalidationCause,
-}
-
-#[derive(Clone, Debug)]
-enum InvalidationCause {
- Access(AccessKind),
- Retag(Permission, RetagCause),
-}
-
-impl Invalidation {
- fn generate_diagnostic(&self) -> (String, SpanData) {
- let message = if let InvalidationCause::Retag(_, RetagCause::FnEntry) = self.cause {
- // For a FnEntry retag, our Span points at the caller.
- // See `DiagnosticCx::log_invalidation`.
- format!(
- "{:?} was later invalidated at offsets {:?} by a {} inside this call",
- self.tag, self.range, self.cause
- )
- } else {
- format!(
- "{:?} was later invalidated at offsets {:?} by a {}",
- self.tag, self.range, self.cause
- )
- };
- (message, self.span.data())
- }
-}
-
-impl fmt::Display for InvalidationCause {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- InvalidationCause::Access(kind) => write!(f, "{kind}"),
- InvalidationCause::Retag(perm, kind) =>
- if *kind == RetagCause::FnEntry {
- write!(f, "{perm:?} FnEntry retag")
- } else {
- write!(f, "{perm:?} retag")
- },
- }
- }
-}
-
-#[derive(Clone, Debug)]
-struct Protection {
- tag: SbTag,
- span: Span,
-}
-
-#[derive(Clone)]
-pub struct TagHistory {
- pub created: (String, SpanData),
- pub invalidated: Option<(String, SpanData)>,
- pub protected: Option<(String, SpanData)>,
-}
-
-pub struct DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
- operation: Operation,
- machine: &'ecx MiriMachine<'mir, 'tcx>,
-}
-
-pub struct DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
- operation: Operation,
- machine: &'ecx MiriMachine<'mir, 'tcx>,
- history: &'history mut AllocHistory,
- offset: Size,
-}
-
-impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
- pub fn build<'history>(
- self,
- history: &'history mut AllocHistory,
- offset: Size,
- ) -> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
- DiagnosticCx { operation: self.operation, machine: self.machine, history, offset }
- }
-
- pub fn retag(
- machine: &'ecx MiriMachine<'mir, 'tcx>,
- cause: RetagCause,
- new_tag: SbTag,
- orig_tag: ProvenanceExtra,
- range: AllocRange,
- ) -> Self {
- let operation =
- Operation::Retag(RetagOp { cause, new_tag, orig_tag, range, permission: None });
-
- DiagnosticCxBuilder { machine, operation }
- }
-
- pub fn read(
- machine: &'ecx MiriMachine<'mir, 'tcx>,
- tag: ProvenanceExtra,
- range: AllocRange,
- ) -> Self {
- let operation = Operation::Access(AccessOp { kind: AccessKind::Read, tag, range });
- DiagnosticCxBuilder { machine, operation }
- }
-
- pub fn write(
- machine: &'ecx MiriMachine<'mir, 'tcx>,
- tag: ProvenanceExtra,
- range: AllocRange,
- ) -> Self {
- let operation = Operation::Access(AccessOp { kind: AccessKind::Write, tag, range });
- DiagnosticCxBuilder { machine, operation }
- }
-
- pub fn dealloc(machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra) -> Self {
- let operation = Operation::Dealloc(DeallocOp { tag });
- DiagnosticCxBuilder { machine, operation }
- }
-}
-
-impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
- pub fn unbuild(self) -> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
- DiagnosticCxBuilder { machine: self.machine, operation: self.operation }
- }
-}
-
-#[derive(Debug, Clone)]
-enum Operation {
- Retag(RetagOp),
- Access(AccessOp),
- Dealloc(DeallocOp),
-}
-
-#[derive(Debug, Clone)]
-struct RetagOp {
- cause: RetagCause,
- new_tag: SbTag,
- orig_tag: ProvenanceExtra,
- range: AllocRange,
- permission: Option<Permission>,
-}
-
-#[derive(Debug, Clone, Copy, PartialEq)]
-pub enum RetagCause {
- Normal,
- FnReturn,
- FnEntry,
- TwoPhase,
-}
-
-#[derive(Debug, Clone)]
-struct AccessOp {
- kind: AccessKind,
- tag: ProvenanceExtra,
- range: AllocRange,
-}
-
-#[derive(Debug, Clone)]
-struct DeallocOp {
- tag: ProvenanceExtra,
-}
-
-impl AllocHistory {
- pub fn new(id: AllocId, item: Item, machine: &MiriMachine<'_, '_>) -> Self {
- Self {
- id,
- base: (item, machine.current_span()),
- creations: SmallVec::new(),
- invalidations: SmallVec::new(),
- protectors: SmallVec::new(),
- }
- }
-}
-
-impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
- pub fn start_grant(&mut self, perm: Permission) {
- let Operation::Retag(op) = &mut self.operation else {
- unreachable!("start_grant must only be called during a retag, this is: {:?}", self.operation)
- };
- op.permission = Some(perm);
-
- let last_creation = &mut self.history.creations.last_mut().unwrap();
- match last_creation.retag.permission {
- None => {
- last_creation.retag.permission = Some(perm);
- }
- Some(previous) =>
- if previous != perm {
- // 'Split up' the creation event.
- let previous_range = last_creation.retag.range;
- last_creation.retag.range = alloc_range(previous_range.start, self.offset);
- let mut new_event = last_creation.clone();
- new_event.retag.range = alloc_range(self.offset, previous_range.end());
- new_event.retag.permission = Some(perm);
- self.history.creations.push(new_event);
- },
- }
- }
-
- pub fn log_creation(&mut self) {
- let Operation::Retag(op) = &self.operation else {
- unreachable!("log_creation must only be called during a retag")
- };
- self.history
- .creations
- .push(Creation { retag: op.clone(), span: self.machine.current_span() });
- }
-
- pub fn log_invalidation(&mut self, tag: SbTag) {
- let mut span = self.machine.current_span();
- let (range, cause) = match &self.operation {
- Operation::Retag(RetagOp { cause, range, permission, .. }) => {
- if *cause == RetagCause::FnEntry {
- span = self.machine.caller_span();
- }
- (*range, InvalidationCause::Retag(permission.unwrap(), *cause))
- }
- Operation::Access(AccessOp { kind, range, .. }) =>
- (*range, InvalidationCause::Access(*kind)),
- Operation::Dealloc(_) => {
- // This can be reached, but never be relevant later since the entire allocation is
- // gone now.
- return;
- }
- };
- self.history.invalidations.push(Invalidation { tag, range, span, cause });
- }
-
- pub fn log_protector(&mut self) {
- let Operation::Retag(op) = &self.operation else {
- unreachable!("Protectors can only be created during a retag")
- };
- self.history
- .protectors
- .push(Protection { tag: op.new_tag, span: self.machine.current_span() });
- }
-
- pub fn get_logs_relevant_to(
- &self,
- tag: SbTag,
- protector_tag: Option<SbTag>,
- ) -> Option<TagHistory> {
- let Some(created) = self.history
- .creations
- .iter()
- .rev()
- .find_map(|event| {
- // First, look for a Creation event where the tag and the offset matches. This
- // ensrues that we pick the right Creation event when a retag isn't uniform due to
- // Freeze.
- let range = event.retag.range;
- if event.retag.new_tag == tag
- && self.offset >= range.start
- && self.offset < (range.start + range.size)
- {
- Some(event.generate_diagnostic())
- } else {
- None
- }
- })
- .or_else(|| {
- // If we didn't find anything with a matching offset, just return the event where
- // the tag was created. This branch is hit when we use a tag at an offset that
- // doesn't have the tag.
- self.history.creations.iter().rev().find_map(|event| {
- if event.retag.new_tag == tag {
- Some(event.generate_diagnostic())
- } else {
- None
- }
- })
- }).or_else(|| {
- // If we didn't find a retag that created this tag, it might be the base tag of
- // this allocation.
- if self.history.base.0.tag() == tag {
- Some((
- format!("{tag:?} was created here, as the base tag for {:?}", self.history.id),
- self.history.base.1.data()
- ))
- } else {
- None
- }
- }) else {
- // But if we don't have a creation event, this is related to a wildcard, and there
- // is really nothing we can do to help.
- return None;
- };
-
- let invalidated = self.history.invalidations.iter().rev().find_map(|event| {
- if event.tag == tag { Some(event.generate_diagnostic()) } else { None }
- });
-
- let protected = protector_tag
- .and_then(|protector| {
- self.history.protectors.iter().find(|protection| protection.tag == protector)
- })
- .map(|protection| {
- let protected_tag = protection.tag;
- (format!("{protected_tag:?} is this argument"), protection.span.data())
- });
-
- Some(TagHistory { created, invalidated, protected })
- }
-
- /// Report a descriptive error when `new` could not be granted from `derived_from`.
- #[inline(never)] // This is only called on fatal code paths
- pub(super) fn grant_error(&self, stack: &Stack) -> InterpError<'tcx> {
- let Operation::Retag(op) = &self.operation else {
- unreachable!("grant_error should only be called during a retag")
- };
- let perm =
- op.permission.expect("`start_grant` must be called before calling `grant_error`");
- let action = format!(
- "trying to retag from {:?} for {:?} permission at {:?}[{:#x}]",
- op.orig_tag,
- perm,
- self.history.id,
- self.offset.bytes(),
- );
- err_sb_ub(
- format!("{action}{}", error_cause(stack, op.orig_tag)),
- Some(operation_summary(&op.cause.summary(), self.history.id, op.range)),
- op.orig_tag.and_then(|orig_tag| self.get_logs_relevant_to(orig_tag, None)),
- )
- }
-
- /// Report a descriptive error when `access` is not permitted based on `tag`.
- #[inline(never)] // This is only called on fatal code paths
- pub(super) fn access_error(&self, stack: &Stack) -> InterpError<'tcx> {
- // Deallocation and retagging also do an access as part of their thing, so handle that here, too.
- let op = match &self.operation {
- Operation::Access(op) => op,
- Operation::Retag(_) => return self.grant_error(stack),
- Operation::Dealloc(_) => return self.dealloc_error(stack),
- };
- let action = format!(
- "attempting a {access} using {tag:?} at {alloc_id:?}[{offset:#x}]",
- access = op.kind,
- tag = op.tag,
- alloc_id = self.history.id,
- offset = self.offset.bytes(),
- );
- err_sb_ub(
- format!("{action}{}", error_cause(stack, op.tag)),
- Some(operation_summary("an access", self.history.id, op.range)),
- op.tag.and_then(|tag| self.get_logs_relevant_to(tag, None)),
- )
- }
-
- #[inline(never)] // This is only called on fatal code paths
- pub(super) fn protector_error(&self, item: &Item, kind: ProtectorKind) -> InterpError<'tcx> {
- let protected = match kind {
- ProtectorKind::WeakProtector => "weakly protected",
- ProtectorKind::StrongProtector => "strongly protected",
- };
- let call_id = self
- .machine
- .threads
- .all_stacks()
- .flatten()
- .map(|frame| {
- frame.extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data")
- })
- .find(|frame| frame.protected_tags.contains(&item.tag()))
- .map(|frame| frame.call_id)
- .unwrap(); // FIXME: Surely we should find something, but a panic seems wrong here?
- match self.operation {
- Operation::Dealloc(_) =>
- err_sb_ub(
- format!("deallocating while item {item:?} is {protected} by call {call_id:?}",),
- None,
- None,
- ),
- Operation::Retag(RetagOp { orig_tag: tag, .. })
- | Operation::Access(AccessOp { tag, .. }) =>
- err_sb_ub(
- format!(
- "not granting access to tag {tag:?} because that would remove {item:?} which is {protected} because it is an argument of call {call_id:?}",
- ),
- None,
- tag.and_then(|tag| self.get_logs_relevant_to(tag, Some(item.tag()))),
- ),
- }
- }
-
- #[inline(never)] // This is only called on fatal code paths
- pub fn dealloc_error(&self, stack: &Stack) -> InterpError<'tcx> {
- let Operation::Dealloc(op) = &self.operation else {
- unreachable!("dealloc_error should only be called during a deallocation")
- };
- err_sb_ub(
- format!(
- "attempting deallocation using {tag:?} at {alloc_id:?}{cause}",
- tag = op.tag,
- alloc_id = self.history.id,
- cause = error_cause(stack, op.tag),
- ),
- None,
- op.tag.and_then(|tag| self.get_logs_relevant_to(tag, None)),
- )
- }
-
- #[inline(never)]
- pub fn check_tracked_tag_popped(&self, item: &Item, global: &GlobalStateInner) {
- if !global.tracked_pointer_tags.contains(&item.tag()) {
- return;
- }
- let summary = match self.operation {
- Operation::Dealloc(_) => None,
- Operation::Access(AccessOp { kind, tag, .. }) => Some((tag, kind)),
- Operation::Retag(RetagOp { orig_tag, permission, .. }) => {
- let kind = match permission
- .expect("start_grant should set the current permission before popping a tag")
- {
- Permission::SharedReadOnly => AccessKind::Read,
- Permission::Unique => AccessKind::Write,
- Permission::SharedReadWrite | Permission::Disabled => {
- panic!("Only SharedReadOnly and Unique retags can pop tags");
- }
- };
- Some((orig_tag, kind))
- }
- };
- self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
- }
-}
-
-fn operation_summary(operation: &str, alloc_id: AllocId, alloc_range: AllocRange) -> String {
- format!("this error occurs as part of {operation} at {alloc_id:?}{alloc_range:?}")
-}
-
-fn error_cause(stack: &Stack, prov_extra: ProvenanceExtra) -> &'static str {
- if let ProvenanceExtra::Concrete(tag) = prov_extra {
- if (0..stack.len())
- .map(|i| stack.get(i).unwrap())
- .any(|item| item.tag() == tag && item.perm() != Permission::Disabled)
- {
- ", but that tag only grants SharedReadOnly permission for this location"
- } else {
- ", but that tag does not exist in the borrow stack for this location"
- }
- } else {
- ", but no exposed tags have suitable permission in the borrow stack for this location"
- }
-}
-
-impl RetagCause {
- fn summary(&self) -> String {
- match self {
- RetagCause::Normal => "retag",
- RetagCause::FnEntry => "FnEntry retag",
- RetagCause::FnReturn => "FnReturn retag",
- RetagCause::TwoPhase => "two-phase retag",
- }
- .to_string()
- }
-}
+++ /dev/null
-use crate::stacked_borrows::SbTag;
-use std::fmt;
-use std::num::NonZeroU64;
-
-/// An item in the per-location borrow stack.
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct Item(u64);
-
-// An Item contains 3 bitfields:
-// * Bits 0-61 store an SbTag
-// * Bits 61-63 store a Permission
-// * Bit 64 stores a flag which indicates if we have a protector
-const TAG_MASK: u64 = u64::MAX >> 3;
-const PERM_MASK: u64 = 0x3 << 61;
-const PROTECTED_MASK: u64 = 0x1 << 63;
-
-const PERM_SHIFT: u64 = 61;
-const PROTECTED_SHIFT: u64 = 63;
-
-impl Item {
- pub fn new(tag: SbTag, perm: Permission, protected: bool) -> Self {
- assert!(tag.0.get() <= TAG_MASK);
- let packed_tag = tag.0.get();
- let packed_perm = perm.to_bits() << PERM_SHIFT;
- let packed_protected = u64::from(protected) << PROTECTED_SHIFT;
-
- let new = Self(packed_tag | packed_perm | packed_protected);
-
- debug_assert!(new.tag() == tag);
- debug_assert!(new.perm() == perm);
- debug_assert!(new.protected() == protected);
-
- new
- }
-
- /// The pointers the permission is granted to.
- pub fn tag(self) -> SbTag {
- SbTag(NonZeroU64::new(self.0 & TAG_MASK).unwrap())
- }
-
- /// The permission this item grants.
- pub fn perm(self) -> Permission {
- Permission::from_bits((self.0 & PERM_MASK) >> PERM_SHIFT)
- }
-
- /// Whether or not there is a protector for this tag
- pub fn protected(self) -> bool {
- self.0 & PROTECTED_MASK > 0
- }
-
- /// Set the Permission stored in this Item
- pub fn set_permission(&mut self, perm: Permission) {
- // Clear the current set permission
- self.0 &= !PERM_MASK;
- // Write Permission::Disabled to the Permission bits
- self.0 |= perm.to_bits() << PERM_SHIFT;
- }
-}
-
-impl fmt::Debug for Item {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "[{:?} for {:?}]", self.perm(), self.tag())
- }
-}
-
-/// Indicates which permission is granted (by this item to some pointers)
-#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
-pub enum Permission {
- /// Grants unique mutable access.
- Unique,
- /// Grants shared mutable access.
- SharedReadWrite,
- /// Grants shared read-only access.
- SharedReadOnly,
- /// Grants no access, but separates two groups of SharedReadWrite so they are not
- /// all considered mutually compatible.
- Disabled,
-}
-
-impl Permission {
- const UNIQUE: u64 = 0;
- const SHARED_READ_WRITE: u64 = 1;
- const SHARED_READ_ONLY: u64 = 2;
- const DISABLED: u64 = 3;
-
- fn to_bits(self) -> u64 {
- match self {
- Permission::Unique => Self::UNIQUE,
- Permission::SharedReadWrite => Self::SHARED_READ_WRITE,
- Permission::SharedReadOnly => Self::SHARED_READ_ONLY,
- Permission::Disabled => Self::DISABLED,
- }
- }
-
- fn from_bits(perm: u64) -> Self {
- match perm {
- Self::UNIQUE => Permission::Unique,
- Self::SHARED_READ_WRITE => Permission::SharedReadWrite,
- Self::SHARED_READ_ONLY => Permission::SharedReadOnly,
- Self::DISABLED => Permission::Disabled,
- _ => unreachable!(),
- }
- }
-}
+++ /dev/null
-//! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
-//! for further information.
-
-use log::trace;
-use std::cell::RefCell;
-use std::cmp;
-use std::fmt;
-use std::fmt::Write;
-use std::num::NonZeroU64;
-
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir::Mutability;
-use rustc_middle::mir::RetagKind;
-use rustc_middle::ty::{
- self,
- layout::{HasParamEnv, LayoutOf},
-};
-use rustc_target::abi::Abi;
-use rustc_target::abi::Size;
-use smallvec::SmallVec;
-
-use crate::*;
-
-pub mod diagnostics;
-use diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, RetagCause, TagHistory};
-
-mod item;
-pub use item::{Item, Permission};
-mod stack;
-pub use stack::Stack;
-
-pub type CallId = NonZeroU64;
-
-// Even reading memory can have effects on the stack, so we need a `RefCell` here.
-pub type AllocExtra = RefCell<Stacks>;
-
-/// Tracking pointer provenance
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub struct SbTag(NonZeroU64);
-
-impl SbTag {
- pub fn new(i: u64) -> Option<Self> {
- NonZeroU64::new(i).map(SbTag)
- }
-
- // The default to be used when SB is disabled
- #[allow(clippy::should_implement_trait)]
- pub fn default() -> Self {
- Self::new(1).unwrap()
- }
-}
-
-impl fmt::Debug for SbTag {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "<{}>", self.0)
- }
-}
-
-#[derive(Debug)]
-pub struct FrameExtra {
- /// The ID of the call this frame corresponds to.
- call_id: CallId,
-
- /// If this frame is protecting any tags, they are listed here. We use this list to do
- /// incremental updates of the global list of protected tags stored in the
- /// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected
- /// tag, to identify which call is responsible for protecting the tag.
- /// See `Stack::item_invalidated` for more explanation.
- ///
- /// This will contain one tag per reference passed to the function, so
- /// a size of 2 is enough for the vast majority of functions.
- protected_tags: SmallVec<[SbTag; 2]>,
-}
-
-impl VisitTags for FrameExtra {
- fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
- // `protected_tags` are fine to GC.
- }
-}
-
-/// Extra per-allocation state.
-#[derive(Clone, Debug)]
-pub struct Stacks {
- // Even reading memory can have effects on the stack, so we need a `RefCell` here.
- stacks: RangeMap<Stack>,
- /// Stores past operations on this allocation
- history: AllocHistory,
- /// The set of tags that have been exposed inside this allocation.
- exposed_tags: FxHashSet<SbTag>,
- /// Whether this memory has been modified since the last time the tag GC ran
- modified_since_last_gc: bool,
-}
-
-/// The flavor of the protector.
-#[derive(Copy, Clone, Debug)]
-enum ProtectorKind {
- /// Protected against aliasing violations from other pointers.
- ///
- /// Items protected like this cause UB when they are invalidated, *but* the pointer itself may
- /// still be used to issue a deallocation.
- ///
- /// This is required for LLVM IR pointers that are `noalias` but *not* `dereferenceable`.
- WeakProtector,
-
- /// Protected against any kind of invalidation.
- ///
- /// Items protected like this cause UB when they are invalidated or the memory is deallocated.
- /// This is strictly stronger protection than `WeakProtector`.
- ///
- /// This is required for LLVM IR pointers that are `dereferenceable` (and also allows `noalias`).
- StrongProtector,
-}
-
-/// Extra global state, available to the memory access hooks.
-#[derive(Debug)]
-pub struct GlobalStateInner {
- /// Next unused pointer ID (tag).
- next_ptr_tag: SbTag,
- /// Table storing the "base" tag for each allocation.
- /// The base tag is the one used for the initial pointer.
- /// We need this in a separate table to handle cyclic statics.
- base_ptr_tags: FxHashMap<AllocId, SbTag>,
- /// Next unused call ID (for protectors).
- next_call_id: CallId,
- /// All currently protected tags, and the status of their protection.
- /// An item is protected if its tag is in this set, *and* it has the "protected" bit set.
- /// We add tags to this when they are created with a protector in `reborrow`, and
- /// we remove tags from this when the call which is protecting them returns, in
- /// `GlobalStateInner::end_call`. See `Stack::item_invalidated` for more details.
- protected_tags: FxHashMap<SbTag, ProtectorKind>,
- /// The pointer ids to trace
- tracked_pointer_tags: FxHashSet<SbTag>,
- /// The call ids to trace
- tracked_call_ids: FxHashSet<CallId>,
- /// Whether to recurse into datatypes when searching for pointers to retag.
- retag_fields: RetagFields,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum RetagFields {
- /// Don't retag any fields.
- No,
- /// Retag all fields.
- Yes,
- /// Only retag fields of types with Scalar and ScalarPair layout,
- /// to match the LLVM `noalias` we generate.
- OnlyScalar,
-}
-
-impl VisitTags for GlobalStateInner {
- fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
- // The only candidate is base_ptr_tags, and that does not need visiting since we don't ever
- // GC the bottommost tag.
- }
-}
-
-/// We need interior mutable access to the global state.
-pub type GlobalState = RefCell<GlobalStateInner>;
-
-/// Indicates which kind of access is being performed.
-#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
-pub enum AccessKind {
- Read,
- Write,
-}
-
-impl fmt::Display for AccessKind {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- AccessKind::Read => write!(f, "read access"),
- AccessKind::Write => write!(f, "write access"),
- }
- }
-}
-
-/// Indicates which kind of reference is being created.
-/// Used by high-level `reborrow` to compute which permissions to grant to the
-/// new pointer.
-#[derive(Copy, Clone, Hash, PartialEq, Eq)]
-pub enum RefKind {
- /// `&mut` and `Box`.
- Unique { two_phase: bool },
- /// `&` with or without interior mutability.
- Shared,
- /// `*mut`/`*const` (raw pointers).
- Raw { mutable: bool },
-}
-
-impl fmt::Display for RefKind {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- RefKind::Unique { two_phase: false } => write!(f, "unique reference"),
- RefKind::Unique { two_phase: true } => write!(f, "unique reference (two-phase)"),
- RefKind::Shared => write!(f, "shared reference"),
- RefKind::Raw { mutable: true } => write!(f, "raw (mutable) pointer"),
- RefKind::Raw { mutable: false } => write!(f, "raw (constant) pointer"),
- }
- }
-}
-
-/// Utilities for initialization and ID generation
-impl GlobalStateInner {
- pub fn new(
- tracked_pointer_tags: FxHashSet<SbTag>,
- tracked_call_ids: FxHashSet<CallId>,
- retag_fields: RetagFields,
- ) -> Self {
- GlobalStateInner {
- next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
- base_ptr_tags: FxHashMap::default(),
- next_call_id: NonZeroU64::new(1).unwrap(),
- protected_tags: FxHashMap::default(),
- tracked_pointer_tags,
- tracked_call_ids,
- retag_fields,
- }
- }
-
- /// Generates a new pointer tag. Remember to also check track_pointer_tags and log its creation!
- fn new_ptr(&mut self) -> SbTag {
- let id = self.next_ptr_tag;
- self.next_ptr_tag = SbTag(NonZeroU64::new(id.0.get() + 1).unwrap());
- id
- }
-
- pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameExtra {
- let call_id = self.next_call_id;
- trace!("new_frame: Assigning call ID {}", call_id);
- if self.tracked_call_ids.contains(&call_id) {
- machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id));
- }
- self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap();
- FrameExtra { call_id, protected_tags: SmallVec::new() }
- }
-
- pub fn end_call(&mut self, frame: &machine::FrameData<'_>) {
- for tag in &frame
- .stacked_borrows
- .as_ref()
- .expect("we should have Stacked Borrows data")
- .protected_tags
- {
- self.protected_tags.remove(tag);
- }
- }
-
- pub fn base_ptr_tag(&mut self, id: AllocId, machine: &MiriMachine<'_, '_>) -> SbTag {
- self.base_ptr_tags.get(&id).copied().unwrap_or_else(|| {
- let tag = self.new_ptr();
- if self.tracked_pointer_tags.contains(&tag) {
- machine.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(tag.0, None, None));
- }
- trace!("New allocation {:?} has base tag {:?}", id, tag);
- self.base_ptr_tags.try_insert(id, tag).unwrap();
- tag
- })
- }
-}
-
-/// Error reporting
-pub fn err_sb_ub<'tcx>(
- msg: String,
- help: Option<String>,
- history: Option<TagHistory>,
-) -> InterpError<'tcx> {
- err_machine_stop!(TerminationInfo::StackedBorrowsUb { msg, help, history })
-}
-
-// # Stacked Borrows Core Begin
-
-/// We need to make at least the following things true:
-///
-/// U1: After creating a `Uniq`, it is at the top.
-/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
-/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
-///
-/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
-/// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
-/// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
-/// gets popped.
-/// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
-/// F3: If an access happens with an `&` outside `UnsafeCell`,
-/// it requires the `SharedReadOnly` to still be in the stack.
-
-/// Core relation on `Permission` to define which accesses are allowed
-impl Permission {
- /// This defines for a given permission, whether it permits the given kind of access.
- fn grants(self, access: AccessKind) -> bool {
- // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
- self != Permission::Disabled
- && (access == AccessKind::Read || self != Permission::SharedReadOnly)
- }
-}
-
-/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
-#[derive(Copy, Clone, Debug)]
-enum ItemInvalidationCause {
- Conflict,
- Dealloc,
-}
-
-/// Core per-location operations: access, dealloc, reborrow.
-impl<'tcx> Stack {
- /// Find the first write-incompatible item above the given one --
- /// i.e, find the height to which the stack will be truncated when writing to `granting`.
- fn find_first_write_incompatible(&self, granting: usize) -> usize {
- let perm = self.get(granting).unwrap().perm();
- match perm {
- Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
- Permission::Disabled => bug!("Cannot use Disabled for anything"),
- Permission::Unique => {
- // On a write, everything above us is incompatible.
- granting + 1
- }
- Permission::SharedReadWrite => {
- // The SharedReadWrite *just* above us are compatible, to skip those.
- let mut idx = granting + 1;
- while let Some(item) = self.get(idx) {
- if item.perm() == Permission::SharedReadWrite {
- // Go on.
- idx += 1;
- } else {
- // Found first incompatible!
- break;
- }
- }
- idx
- }
- }
- }
-
- /// Check if the given item is protected.
- ///
- /// The `provoking_access` argument is only used to produce diagnostics.
- /// It is `Some` when we are granting the contained access for said tag, and it is
- /// `None` during a deallocation.
- /// Within `provoking_access, the `AllocRange` refers the entire operation, and
- /// the `Size` refers to the specific location in the `AllocRange` that we are
- /// currently checking.
- fn item_invalidated(
- item: &Item,
- global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
- cause: ItemInvalidationCause,
- ) -> InterpResult<'tcx> {
- if !global.tracked_pointer_tags.is_empty() {
- dcx.check_tracked_tag_popped(item, global);
- }
-
- if !item.protected() {
- return Ok(());
- }
-
- // We store tags twice, once in global.protected_tags and once in each call frame.
- // We do this because consulting a single global set in this function is faster
- // than attempting to search all call frames in the program for the `FrameExtra`
- // (if any) which is protecting the popped tag.
- //
- // This duplication trades off making `end_call` slower to make this function faster. This
- // trade-off is profitable in practice for a combination of two reasons.
- // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
- // Therefore, adding overhead in function call/return is profitable even if it only
- // saves a little work in this function.
- // 2. Most frames protect only one or two tags. So this duplicative global turns a search
- // which ends up about linear in the number of protected tags in the program into a
- // constant time check (and a slow linear, because the tags in the frames aren't contiguous).
- if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
- // The only way this is okay is if the protector is weak and we are deallocating with
- // the right pointer.
- let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
- && matches!(protector_kind, ProtectorKind::WeakProtector);
- if !allowed {
- return Err(dcx.protector_error(item, protector_kind).into());
- }
- }
- Ok(())
- }
-
- /// Test if a memory `access` using pointer tagged `tag` is granted.
- /// If yes, return the index of the item that granted it.
- /// `range` refers the entire operation, and `offset` refers to the specific offset into the
- /// allocation that we are currently checking.
- fn access(
- &mut self,
- access: AccessKind,
- tag: ProvenanceExtra,
- global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
- exposed_tags: &FxHashSet<SbTag>,
- ) -> InterpResult<'tcx> {
- // Two main steps: Find granting item, remove incompatible items above.
-
- // Step 1: Find granting item.
- let granting_idx =
- self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
-
- // Step 2: Remove incompatible items above them. Make sure we do not remove protected
- // items. Behavior differs for reads and writes.
- // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
- if access == AccessKind::Write {
- // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
- // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
- let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
- // The granting_idx *might* be approximate, but any lower idx would remove more
- // things. Even if this is a Unique and the lower idx is an SRW (which removes
- // less), there is an SRW group boundary here so strictly more would get removed.
- self.find_first_write_incompatible(granting_idx)
- } else {
- // We are writing to something in the unknown part.
- // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
- 0
- };
- self.pop_items_after(first_incompatible_idx, |item| {
- Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
- dcx.log_invalidation(item.tag());
- Ok(())
- })?;
- } else {
- // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
- // The reason this is not following the stack discipline (by removing the first Unique and
- // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
- // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
- // `SharedReadWrite` for `raw`.
- // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
- // reference and use that.
- // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
- let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
- // The granting_idx *might* be approximate, but any lower idx would disable more things.
- granting_idx + 1
- } else {
- // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
- 0
- };
- self.disable_uniques_starting_at(first_incompatible_idx, |item| {
- Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
- dcx.log_invalidation(item.tag());
- Ok(())
- })?;
- }
-
- // If this was an approximate action, we now collapse everything into an unknown.
- if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
- // Compute the upper bound of the items that remain.
- // (This is why we did all the work above: to reduce the items we have to consider here.)
- let mut max = NonZeroU64::new(1).unwrap();
- for i in 0..self.len() {
- let item = self.get(i).unwrap();
- // Skip disabled items, they cannot be matched anyway.
- if !matches!(item.perm(), Permission::Disabled) {
- // We are looking for a strict upper bound, so add 1 to this tag.
- max = cmp::max(item.tag().0.checked_add(1).unwrap(), max);
- }
- }
- if let Some(unk) = self.unknown_bottom() {
- max = cmp::max(unk.0, max);
- }
- // Use `max` as new strict upper bound for everything.
- trace!(
- "access: forgetting stack to upper bound {max} due to wildcard or unknown access"
- );
- self.set_unknown_bottom(SbTag(max));
- }
-
- // Done.
- Ok(())
- }
-
- /// Deallocate a location: Like a write access, but also there must be no
- /// active protectors at all because we will remove all items.
- fn dealloc(
- &mut self,
- tag: ProvenanceExtra,
- global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
- exposed_tags: &FxHashSet<SbTag>,
- ) -> InterpResult<'tcx> {
- // Step 1: Make a write access.
- // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
- self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
-
- // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
- for idx in (0..self.len()).rev() {
- let item = self.get(idx).unwrap();
- Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
- }
-
- Ok(())
- }
-
- /// Derive a new pointer from one with the given tag.
- ///
- /// `access` indicates which kind of memory access this retag itself should correspond to.
- fn grant(
- &mut self,
- derived_from: ProvenanceExtra,
- new: Item,
- access: Option<AccessKind>,
- global: &GlobalStateInner,
- dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
- exposed_tags: &FxHashSet<SbTag>,
- ) -> InterpResult<'tcx> {
- dcx.start_grant(new.perm());
-
- // Compute where to put the new item.
- // Either way, we ensure that we insert the new item in a way such that between
- // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
- let new_idx = if let Some(access) = access {
- // Simple case: We are just a regular memory access, and then push our thing on top,
- // like a regular stack.
- // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
- self.access(access, derived_from, global, dcx, exposed_tags)?;
-
- // We insert "as far up as possible": We know only compatible items are remaining
- // on top of `derived_from`, and we want the new item at the top so that we
- // get the strongest possible guarantees.
- // This ensures U1 and F1.
- self.len()
- } else {
- // The tricky case: creating a new SRW permission without actually being an access.
- assert!(new.perm() == Permission::SharedReadWrite);
-
- // First we figure out which item grants our parent (`derived_from`) this kind of access.
- // We use that to determine where to put the new item.
- let granting_idx = self
- .find_granting(AccessKind::Write, derived_from, exposed_tags)
- .map_err(|()| dcx.grant_error(self))?;
-
- let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from) else {
- // The parent is a wildcard pointer or matched the unknown bottom.
- // This is approximate. Nobody knows what happened, so forget everything.
- // The new thing is SRW anyway, so we cannot push it "on top of the unkown part"
- // (for all we know, it might join an SRW group inside the unknown).
- trace!("reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown");
- self.set_unknown_bottom(global.next_ptr_tag);
- return Ok(());
- };
-
- // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
- // access. Instead of popping the stack, we insert the item at the place the stack would
- // be popped to (i.e., we insert it above all the write-compatible items).
- // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
- self.find_first_write_incompatible(granting_idx)
- };
-
- // Put the new item there.
- trace!("reborrow: adding item {:?}", new);
- self.insert(new_idx, new);
- Ok(())
- }
-}
-// # Stacked Borrows Core End
-
-/// Integration with the SbTag garbage collector
-impl Stacks {
- pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<SbTag>) {
- if self.modified_since_last_gc {
- for stack in self.stacks.iter_mut_all() {
- if stack.len() > 64 {
- stack.retain(live_tags);
- }
- }
- self.modified_since_last_gc = false;
- }
- }
-}
-
-impl VisitTags for Stacks {
- fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
- for tag in self.exposed_tags.iter().copied() {
- visit(tag);
- }
- }
-}
-
-/// Map per-stack operations to higher-level per-location-range operations.
-impl<'tcx> Stacks {
- /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
- /// the [`AllocId`] of the allocation this is associated with.
- fn new(
- size: Size,
- perm: Permission,
- tag: SbTag,
- id: AllocId,
- machine: &MiriMachine<'_, '_>,
- ) -> Self {
- let item = Item::new(tag, perm, false);
- let stack = Stack::new(item);
-
- Stacks {
- stacks: RangeMap::new(size, stack),
- history: AllocHistory::new(id, item, machine),
- exposed_tags: FxHashSet::default(),
- modified_since_last_gc: false,
- }
- }
-
- /// Call `f` on every stack in the range.
- fn for_each(
- &mut self,
- range: AllocRange,
- mut dcx_builder: DiagnosticCxBuilder<'_, '_, 'tcx>,
- mut f: impl FnMut(
- &mut Stack,
- &mut DiagnosticCx<'_, '_, '_, 'tcx>,
- &mut FxHashSet<SbTag>,
- ) -> InterpResult<'tcx>,
- ) -> InterpResult<'tcx> {
- self.modified_since_last_gc = true;
- for (offset, stack) in self.stacks.iter_mut(range.start, range.size) {
- let mut dcx = dcx_builder.build(&mut self.history, offset);
- f(stack, &mut dcx, &mut self.exposed_tags)?;
- dcx_builder = dcx.unbuild();
- }
- Ok(())
- }
-}
-
-/// Glue code to connect with Miri Machine Hooks
-impl Stacks {
- pub fn new_allocation(
- id: AllocId,
- size: Size,
- state: &GlobalState,
- kind: MemoryKind<MiriMemoryKind>,
- machine: &MiriMachine<'_, '_>,
- ) -> Self {
- let mut extra = state.borrow_mut();
- let (base_tag, perm) = match kind {
- // New unique borrow. This tag is not accessible by the program,
- // so it will only ever be used when using the local directly (i.e.,
- // not through a pointer). That is, whenever we directly write to a local, this will pop
- // everything else off the stack, invalidating all previous pointers,
- // and in particular, *all* raw pointers.
- MemoryKind::Stack => (extra.base_ptr_tag(id, machine), Permission::Unique),
- // Everything else is shared by default.
- _ => (extra.base_ptr_tag(id, machine), Permission::SharedReadWrite),
- };
- Stacks::new(size, perm, base_tag, id, machine)
- }
-
- #[inline(always)]
- pub fn before_memory_read<'tcx, 'mir, 'ecx>(
- &mut self,
- alloc_id: AllocId,
- tag: ProvenanceExtra,
- range: AllocRange,
- machine: &'ecx MiriMachine<'mir, 'tcx>,
- ) -> InterpResult<'tcx>
- where
- 'tcx: 'ecx,
- {
- trace!(
- "read access with tag {:?}: {:?}, size {}",
- tag,
- Pointer::new(alloc_id, range.start),
- range.size.bytes()
- );
- let dcx = DiagnosticCxBuilder::read(machine, tag, range);
- let state = machine.stacked_borrows.as_ref().unwrap().borrow();
- self.for_each(range, dcx, |stack, dcx, exposed_tags| {
- stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
- })
- }
-
- #[inline(always)]
- pub fn before_memory_write<'tcx>(
- &mut self,
- alloc_id: AllocId,
- tag: ProvenanceExtra,
- range: AllocRange,
- machine: &mut MiriMachine<'_, 'tcx>,
- ) -> InterpResult<'tcx> {
- trace!(
- "write access with tag {:?}: {:?}, size {}",
- tag,
- Pointer::new(alloc_id, range.start),
- range.size.bytes()
- );
- let dcx = DiagnosticCxBuilder::write(machine, tag, range);
- let state = machine.stacked_borrows.as_ref().unwrap().borrow();
- self.for_each(range, dcx, |stack, dcx, exposed_tags| {
- stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
- })
- }
-
- #[inline(always)]
- pub fn before_memory_deallocation<'tcx>(
- &mut self,
- alloc_id: AllocId,
- tag: ProvenanceExtra,
- range: AllocRange,
- machine: &mut MiriMachine<'_, 'tcx>,
- ) -> InterpResult<'tcx> {
- trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
- let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
- let state = machine.stacked_borrows.as_ref().unwrap().borrow();
- self.for_each(range, dcx, |stack, dcx, exposed_tags| {
- stack.dealloc(tag, &state, dcx, exposed_tags)
- })?;
- Ok(())
- }
-}
-
-/// Retagging/reborrowing. There is some policy in here, such as which permissions
-/// to grant for which references, and when to add protectors.
-impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx>
- for crate::MiriInterpCx<'mir, 'tcx>
-{
-}
-trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'mir, 'tcx> {
- /// Returns the `AllocId` the reborrow was done in, if some actual borrow stack manipulation
- /// happened.
- fn reborrow(
- &mut self,
- place: &MPlaceTy<'tcx, Provenance>,
- size: Size,
- kind: RefKind,
- retag_cause: RetagCause, // What caused this retag, for diagnostics only
- new_tag: SbTag,
- protect: Option<ProtectorKind>,
- ) -> InterpResult<'tcx, Option<AllocId>> {
- let this = self.eval_context_mut();
-
- // It is crucial that this gets called on all code paths, to ensure we track tag creation.
- let log_creation = |this: &MiriInterpCx<'mir, 'tcx>,
- loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
- -> InterpResult<'tcx> {
- let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
- let ty = place.layout.ty;
- if global.tracked_pointer_tags.contains(&new_tag) {
- let mut kind_str = format!("{kind}");
- match kind {
- RefKind::Unique { two_phase: false }
- if !ty.is_unpin(*this.tcx, this.param_env()) =>
- {
- write!(kind_str, " (!Unpin pointee type {ty})").unwrap()
- },
- RefKind::Shared
- if !ty.is_freeze(*this.tcx, this.param_env()) =>
- {
- write!(kind_str, " (!Freeze pointee type {ty})").unwrap()
- },
- _ => write!(kind_str, " (pointee type {ty})").unwrap(),
- };
- this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
- new_tag.0,
- Some(kind_str),
- loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
- ));
- }
- drop(global); // don't hold that reference any longer than we have to
-
- let Some((alloc_id, base_offset, orig_tag)) = loc else {
- return Ok(())
- };
-
- let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id);
- match alloc_kind {
- AllocKind::LiveData => {
- // This should have alloc_extra data, but `get_alloc_extra` can still fail
- // if converting this alloc_id from a global to a local one
- // uncovers a non-supported `extern static`.
- let extra = this.get_alloc_extra(alloc_id)?;
- let mut stacked_borrows = extra
- .stacked_borrows
- .as_ref()
- .expect("we should have Stacked Borrows data")
- .borrow_mut();
- // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
- // FIXME: can this be done cleaner?
- let dcx = DiagnosticCxBuilder::retag(
- &this.machine,
- retag_cause,
- new_tag,
- orig_tag,
- alloc_range(base_offset, size),
- );
- let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
- dcx.log_creation();
- if protect.is_some() {
- dcx.log_protector();
- }
- }
- AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
- // No stacked borrows on these allocations.
- }
- }
- Ok(())
- };
-
- if size == Size::ZERO {
- trace!(
- "reborrow of size 0: {} reference {:?} derived from {:?} (pointee {})",
- kind,
- new_tag,
- place.ptr,
- place.layout.ty,
- );
- // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
- // touches no bytes so there is no stack to put this tag in.
- // However, if the pointer for this operation points at a real allocation we still
- // record where it was created so that we can issue a helpful diagnostic if there is an
- // attempt to use it for a non-zero-sized access.
- // Dangling slices are a common case here; it's valid to get their length but with raw
- // pointer tagging for example all calls to get_unchecked on them are invalid.
- if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr) {
- log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
- return Ok(Some(alloc_id));
- }
- // This pointer doesn't come with an AllocId. :shrug:
- log_creation(this, None)?;
- return Ok(None);
- }
-
- let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr)?;
- log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
-
- // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
- let (alloc_size, _) = this.get_live_alloc_size_and_align(alloc_id)?;
- if base_offset + size > alloc_size {
- throw_ub!(PointerOutOfBounds {
- alloc_id,
- alloc_size,
- ptr_offset: this.machine_usize_to_isize(base_offset.bytes()),
- ptr_size: size,
- msg: CheckInAllocMsg::InboundsTest
- });
- }
-
- trace!(
- "reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
- kind,
- new_tag,
- orig_tag,
- place.layout.ty,
- Pointer::new(alloc_id, base_offset),
- size.bytes()
- );
-
- if let Some(protect) = protect {
- // See comment in `Stack::item_invalidated` for why we store the tag twice.
- this.frame_mut().extra.stacked_borrows.as_mut().unwrap().protected_tags.push(new_tag);
- this.machine
- .stacked_borrows
- .as_mut()
- .unwrap()
- .get_mut()
- .protected_tags
- .insert(new_tag, protect);
- }
-
- // Update the stacks.
- // Make sure that raw pointers and mutable shared references are reborrowed "weak":
- // There could be existing unique pointers reborrowed from them that should remain valid!
- let (perm, access) = match kind {
- RefKind::Unique { two_phase } => {
- // Permission is Unique only if the type is `Unpin` and this is not twophase
- let perm = if !two_phase && place.layout.ty.is_unpin(*this.tcx, this.param_env()) {
- Permission::Unique
- } else {
- Permission::SharedReadWrite
- };
- // We do an access for all full borrows, even if `!Unpin`.
- let access = if !two_phase { Some(AccessKind::Write) } else { None };
- (perm, access)
- }
- RefKind::Raw { mutable: true } => {
- // Creating a raw ptr does not count as an access
- (Permission::SharedReadWrite, None)
- }
- RefKind::Shared | RefKind::Raw { mutable: false } => {
- // Shared references and *const are a whole different kind of game, the
- // permission is not uniform across the entire range!
- // We need a frozen-sensitive reborrow.
- // We have to use shared references to alloc/memory_extra here since
- // `visit_freeze_sensitive` needs to access the global state.
- let alloc_extra = this.get_alloc_extra(alloc_id)?;
- let mut stacked_borrows = alloc_extra
- .stacked_borrows
- .as_ref()
- .expect("we should have Stacked Borrows data")
- .borrow_mut();
- this.visit_freeze_sensitive(place, size, |mut range, frozen| {
- // Adjust range.
- range.start += base_offset;
- // We are only ever `SharedReadOnly` inside the frozen bits.
- let (perm, access) = if frozen {
- (Permission::SharedReadOnly, Some(AccessKind::Read))
- } else {
- // Inside UnsafeCell, this does *not* count as an access, as there
- // might actually be mutable references further up the stack that
- // we have to keep alive.
- (Permission::SharedReadWrite, None)
- };
- let protected = if frozen {
- protect.is_some()
- } else {
- // We do not protect inside UnsafeCell.
- // This fixes https://github.com/rust-lang/rust/issues/55005.
- false
- };
- let item = Item::new(new_tag, perm, protected);
- let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
- let dcx = DiagnosticCxBuilder::retag(
- &this.machine,
- retag_cause,
- new_tag,
- orig_tag,
- alloc_range(base_offset, size),
- );
- stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
- stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
- })?;
- drop(global);
- if let Some(access) = access {
- assert_eq!(access, AccessKind::Read);
- // Make sure the data race model also knows about this.
- if let Some(data_race) = alloc_extra.data_race.as_ref() {
- data_race.read(alloc_id, range, &this.machine)?;
- }
- }
- Ok(())
- })?;
- return Ok(Some(alloc_id));
- }
- };
-
- // Here we can avoid `borrow()` calls because we have mutable references.
- // Note that this asserts that the allocation is mutable -- but since we are creating a
- // mutable pointer, that seems reasonable.
- let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
- let stacked_borrows = alloc_extra
- .stacked_borrows
- .as_mut()
- .expect("we should have Stacked Borrows data")
- .get_mut();
- let item = Item::new(new_tag, perm, protect.is_some());
- let range = alloc_range(base_offset, size);
- let global = machine.stacked_borrows.as_ref().unwrap().borrow();
- let dcx = DiagnosticCxBuilder::retag(
- machine,
- retag_cause,
- new_tag,
- orig_tag,
- alloc_range(base_offset, size),
- );
- stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
- stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
- })?;
- drop(global);
- if let Some(access) = access {
- assert_eq!(access, AccessKind::Write);
- // Make sure the data race model also knows about this.
- if let Some(data_race) = alloc_extra.data_race.as_mut() {
- data_race.write(alloc_id, range, machine)?;
- }
- }
-
- Ok(Some(alloc_id))
- }
-
- /// Retags an indidual pointer, returning the retagged version.
- /// `mutbl` can be `None` to make this a raw pointer.
- fn retag_reference(
- &mut self,
- val: &ImmTy<'tcx, Provenance>,
- kind: RefKind,
- retag_cause: RetagCause, // What caused this retag, for diagnostics only
- protect: Option<ProtectorKind>,
- ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
- let this = self.eval_context_mut();
- // We want a place for where the ptr *points to*, so we get one.
- let place = this.ref_to_mplace(val)?;
- let size = this.size_and_align_of_mplace(&place)?.map(|(size, _)| size);
- // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
- // bail out -- we cannot reasonably figure out which memory range to reborrow.
- // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
- let size = match size {
- Some(size) => size,
- None => return Ok(val.clone()),
- };
-
- // Compute new borrow.
- let new_tag = this.machine.stacked_borrows.as_mut().unwrap().get_mut().new_ptr();
-
- // Reborrow.
- let alloc_id = this.reborrow(&place, size, kind, retag_cause, new_tag, protect)?;
-
- // Adjust pointer.
- let new_place = place.map_provenance(|p| {
- p.map(|prov| {
- match alloc_id {
- Some(alloc_id) => {
- // If `reborrow` could figure out the AllocId of this ptr, hard-code it into the new one.
- // Even if we started out with a wildcard, this newly retagged pointer is tied to that allocation.
- Provenance::Concrete { alloc_id, sb: new_tag }
- }
- None => {
- // Looks like this has to stay a wildcard pointer.
- assert!(matches!(prov, Provenance::Wildcard));
- Provenance::Wildcard
- }
- }
- })
- });
-
- // Return new pointer.
- Ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
- }
-}
-
-impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
-pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
- fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
- let this = self.eval_context_mut();
- let retag_fields = this.machine.stacked_borrows.as_mut().unwrap().get_mut().retag_fields;
- let retag_cause = match kind {
- RetagKind::TwoPhase { .. } => RetagCause::TwoPhase,
- RetagKind::FnEntry => RetagCause::FnEntry,
- RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
- };
- let mut visitor = RetagVisitor { ecx: this, kind, retag_cause, retag_fields };
- return visitor.visit_value(place);
-
- // The actual visitor.
- struct RetagVisitor<'ecx, 'mir, 'tcx> {
- ecx: &'ecx mut MiriInterpCx<'mir, 'tcx>,
- kind: RetagKind,
- retag_cause: RetagCause,
- retag_fields: RetagFields,
- }
- impl<'ecx, 'mir, 'tcx> RetagVisitor<'ecx, 'mir, 'tcx> {
- #[inline(always)] // yes this helps in our benchmarks
- fn retag_place(
- &mut self,
- place: &PlaceTy<'tcx, Provenance>,
- ref_kind: RefKind,
- retag_cause: RetagCause,
- protector: Option<ProtectorKind>,
- ) -> InterpResult<'tcx> {
- let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
- let val = self.ecx.retag_reference(&val, ref_kind, retag_cause, protector)?;
- self.ecx.write_immediate(*val, place)?;
- Ok(())
- }
- }
- impl<'ecx, 'mir, 'tcx> MutValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>>
- for RetagVisitor<'ecx, 'mir, 'tcx>
- {
- type V = PlaceTy<'tcx, Provenance>;
-
- #[inline(always)]
- fn ecx(&mut self) -> &mut MiriInterpCx<'mir, 'tcx> {
- self.ecx
- }
-
- fn visit_box(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
- // Boxes get a weak protectors, since they may be deallocated.
- self.retag_place(
- place,
- RefKind::Unique { two_phase: false },
- self.retag_cause,
- /*protector*/
- (self.kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
- )
- }
-
- fn visit_value(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
- // If this place is smaller than a pointer, we know that it can't contain any
- // pointers we need to retag, so we can stop recursion early.
- // This optimization is crucial for ZSTs, because they can contain way more fields
- // than we can ever visit.
- if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
- return Ok(());
- }
-
- // Check the type of this value to see what to do with it (retag, or recurse).
- match place.layout.ty.kind() {
- ty::Ref(_, _, mutbl) => {
- let ref_kind = match mutbl {
- Mutability::Mut =>
- RefKind::Unique { two_phase: self.kind == RetagKind::TwoPhase },
- Mutability::Not => RefKind::Shared,
- };
- self.retag_place(
- place,
- ref_kind,
- self.retag_cause,
- /*protector*/
- (self.kind == RetagKind::FnEntry)
- .then_some(ProtectorKind::StrongProtector),
- )?;
- }
- ty::RawPtr(tym) => {
- // We definitely do *not* want to recurse into raw pointers -- wide raw
- // pointers have fields, and for dyn Trait pointees those can have reference
- // type!
- if self.kind == RetagKind::Raw {
- // Raw pointers need to be enabled.
- self.retag_place(
- place,
- RefKind::Raw { mutable: tym.mutbl == Mutability::Mut },
- self.retag_cause,
- /*protector*/ None,
- )?;
- }
- }
- _ if place.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_box()) => {
- // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
- // (Yes this means we technically also recursively retag the allocator itself
- // even if field retagging is not enabled. *shrug*)
- self.walk_value(place)?;
- }
- _ => {
- // Not a reference/pointer/box. Only recurse if configured appropriately.
- let recurse = match self.retag_fields {
- RetagFields::No => false,
- RetagFields::Yes => true,
- RetagFields::OnlyScalar => {
- // Matching `ArgAbi::new` at the time of writing, only fields of
- // `Scalar` and `ScalarPair` ABI are considered.
- matches!(place.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..))
- }
- };
- if recurse {
- self.walk_value(place)?;
- }
- }
- }
-
- Ok(())
- }
- }
- }
-
- /// After a stack frame got pushed, retag the return place so that we are sure
- /// it does not alias with anything.
- ///
- /// This is a HACK because there is nothing in MIR that would make the retag
- /// explicit. Also see <https://github.com/rust-lang/rust/issues/71117>.
- fn retag_return_place(&mut self) -> InterpResult<'tcx> {
- let this = self.eval_context_mut();
- let return_place = &this.frame().return_place;
- if return_place.layout.is_zst() {
- // There may not be any memory here, nothing to do.
- return Ok(());
- }
- // We need this to be in-memory to use tagged pointers.
- let return_place = this.force_allocation(&return_place.clone())?;
-
- // We have to turn the place into a pointer to use the existing code.
- // (The pointer type does not matter, so we use a raw pointer.)
- let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
- let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
- // Reborrow it. With protection! That is part of the point.
- let val = this.retag_reference(
- &val,
- RefKind::Unique { two_phase: false },
- RetagCause::FnReturn,
- /*protector*/ Some(ProtectorKind::StrongProtector),
- )?;
- // And use reborrowed pointer for return place.
- let return_place = this.ref_to_mplace(&val)?;
- this.frame_mut().return_place = return_place.into();
-
- Ok(())
- }
-
- /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
- fn expose_tag(&mut self, alloc_id: AllocId, tag: SbTag) -> InterpResult<'tcx> {
- let this = self.eval_context_mut();
-
- // Function pointers and dead objects don't have an alloc_extra so we ignore them.
- // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
- // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
- let (_size, _align, kind) = this.get_alloc_info(alloc_id);
- match kind {
- AllocKind::LiveData => {
- // This should have alloc_extra data, but `get_alloc_extra` can still fail
- // if converting this alloc_id from a global to a local one
- // uncovers a non-supported `extern static`.
- let alloc_extra = this.get_alloc_extra(alloc_id)?;
- trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
- alloc_extra.stacked_borrows.as_ref().unwrap().borrow_mut().exposed_tags.insert(tag);
- }
- AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
- // No stacked borrows on these allocations.
- }
- }
- Ok(())
- }
-
- fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
- let this = self.eval_context_mut();
- let alloc_extra = this.get_alloc_extra(alloc_id)?;
- let stacks = alloc_extra.stacked_borrows.as_ref().unwrap().borrow();
- for (range, stack) in stacks.stacks.iter_all() {
- print!("{range:?}: [");
- if let Some(bottom) = stack.unknown_bottom() {
- print!(" unknown-bottom(..{bottom:?})");
- }
- for i in 0..stack.len() {
- let item = stack.get(i).unwrap();
- print!(" {:?}{:?}", item.perm(), item.tag());
- }
- println!(" ]");
- }
- Ok(())
- }
-}
+++ /dev/null
-#[cfg(feature = "stack-cache")]
-use std::ops::Range;
-
-use rustc_data_structures::fx::FxHashSet;
-
-use crate::stacked_borrows::{AccessKind, Item, Permission, SbTag};
-use crate::ProvenanceExtra;
-
-/// Exactly what cache size we should use is a difficult tradeoff. There will always be some
-/// workload which has a `SbTag` working set which exceeds the size of the cache, and ends up
-/// falling back to linear searches of the borrow stack very often.
-/// The cost of making this value too large is that the loop in `Stack::insert` which ensures the
-/// entries in the cache stay correct after an insert becomes expensive.
-#[cfg(feature = "stack-cache")]
-const CACHE_LEN: usize = 32;
-
-/// Extra per-location state.
-#[derive(Clone, Debug)]
-pub struct Stack {
- /// Used *mostly* as a stack; never empty.
- /// Invariants:
- /// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
- /// * Except for `Untagged`, no tag occurs in the stack more than once.
- borrows: Vec<Item>,
- /// If this is `Some(id)`, then the actual current stack is unknown. This can happen when
- /// wildcard pointers are used to access this location. What we do know is that `borrows` are at
- /// the top of the stack, and below it are arbitrarily many items whose `tag` is strictly less
- /// than `id`.
- /// When the bottom is unknown, `borrows` always has a `SharedReadOnly` or `Unique` at the bottom;
- /// we never have the unknown-to-known boundary in an SRW group.
- unknown_bottom: Option<SbTag>,
-
- /// A small LRU cache of searches of the borrow stack.
- #[cfg(feature = "stack-cache")]
- cache: StackCache,
- /// On a read, we need to disable all `Unique` above the granting item. We can avoid most of
- /// this scan by keeping track of the region of the borrow stack that may contain `Unique`s.
- #[cfg(feature = "stack-cache")]
- unique_range: Range<usize>,
-}
-
-impl Stack {
- pub fn retain(&mut self, tags: &FxHashSet<SbTag>) {
- let mut first_removed = None;
-
- // We never consider removing the bottom-most tag. For stacks without an unknown
- // bottom this preserves the base tag.
- // Note that the algorithm below is based on considering the tag at read_idx - 1,
- // so precisely considering the tag at index 0 for removal when we have an unknown
- // bottom would complicate the implementation. The simplification of not considering
- // it does not have a significant impact on the degree to which the GC mititages
- // memory growth.
- let mut read_idx = 1;
- let mut write_idx = read_idx;
- while read_idx < self.borrows.len() {
- let left = self.borrows[read_idx - 1];
- let this = self.borrows[read_idx];
- let should_keep = match this.perm() {
- // SharedReadWrite is the simplest case, if it's unreachable we can just remove it.
- Permission::SharedReadWrite => tags.contains(&this.tag()),
- // Only retain a Disabled tag if it is terminating a SharedReadWrite block.
- Permission::Disabled => left.perm() == Permission::SharedReadWrite,
- // Unique and SharedReadOnly can terminate a SharedReadWrite block, so only remove
- // them if they are both unreachable and not directly after a SharedReadWrite.
- Permission::Unique | Permission::SharedReadOnly =>
- left.perm() == Permission::SharedReadWrite || tags.contains(&this.tag()),
- };
-
- if should_keep {
- if read_idx != write_idx {
- self.borrows[write_idx] = self.borrows[read_idx];
- }
- write_idx += 1;
- } else if first_removed.is_none() {
- first_removed = Some(read_idx);
- }
-
- read_idx += 1;
- }
- self.borrows.truncate(write_idx);
-
- #[cfg(not(feature = "stack-cache"))]
- drop(first_removed); // This is only needed for the stack-cache
-
- #[cfg(feature = "stack-cache")]
- if let Some(first_removed) = first_removed {
- // Either end of unique_range may have shifted, all we really know is that we can't
- // have introduced a new Unique.
- if !self.unique_range.is_empty() {
- self.unique_range = 0..self.len();
- }
-
- // Replace any Items which have been collected with the base item, a known-good value.
- for i in 0..CACHE_LEN {
- if self.cache.idx[i] >= first_removed {
- self.cache.items[i] = self.borrows[0];
- self.cache.idx[i] = 0;
- }
- }
- }
- }
-}
-
-/// A very small cache of searches of a borrow stack, mapping `Item`s to their position in said stack.
-///
-/// It may seem like maintaining this cache is a waste for small stacks, but
-/// (a) iterating over small fixed-size arrays is super fast, and (b) empirically this helps *a lot*,
-/// probably because runtime is dominated by large stacks.
-#[cfg(feature = "stack-cache")]
-#[derive(Clone, Debug)]
-struct StackCache {
- items: [Item; CACHE_LEN], // Hot in find_granting
- idx: [usize; CACHE_LEN], // Hot in grant
-}
-
-#[cfg(feature = "stack-cache")]
-impl StackCache {
- /// When a tag is used, we call this function to add or refresh it in the cache.
- ///
- /// We use the position in the cache to represent how recently a tag was used; the first position
- /// is the most recently used tag. So an add shifts every element towards the end, and inserts
- /// the new element at the start. We lose the last element.
- /// This strategy is effective at keeping the most-accessed items in the cache, but it costs a
- /// linear shift across the entire cache when we add a new tag.
- fn add(&mut self, idx: usize, item: Item) {
- self.items.copy_within(0..CACHE_LEN - 1, 1);
- self.items[0] = item;
- self.idx.copy_within(0..CACHE_LEN - 1, 1);
- self.idx[0] = idx;
- }
-}
-
-impl PartialEq for Stack {
- fn eq(&self, other: &Self) -> bool {
- // All the semantics of Stack are in self.borrows, everything else is caching
- self.borrows == other.borrows
- }
-}
-
-impl Eq for Stack {}
-
-impl<'tcx> Stack {
- /// Panics if any of the caching mechanisms have broken,
- /// - The StackCache indices don't refer to the parallel items,
- /// - There are no Unique items outside of first_unique..last_unique
- #[cfg(all(feature = "stack-cache", debug_assertions))]
- fn verify_cache_consistency(&self) {
- // Only a full cache needs to be valid. Also see the comments in find_granting_cache
- // and set_unknown_bottom.
- if self.borrows.len() >= CACHE_LEN {
- for (tag, stack_idx) in self.cache.items.iter().zip(self.cache.idx.iter()) {
- assert_eq!(self.borrows[*stack_idx], *tag);
- }
- }
-
- // Check that all Unique items fall within unique_range.
- for (idx, item) in self.borrows.iter().enumerate() {
- if item.perm() == Permission::Unique {
- assert!(
- self.unique_range.contains(&idx),
- "{:?} {:?}",
- self.unique_range,
- self.borrows
- );
- }
- }
-
- // Check that the unique_range is a valid index into the borrow stack.
- // This asserts that the unique_range's start <= end.
- let _uniques = &self.borrows[self.unique_range.clone()];
-
- // We cannot assert that the unique range is precise.
- // Both ends may shift around when `Stack::retain` is called. Additionally,
- // when we pop items within the unique range, setting the end of the range precisely
- // requires doing a linear search of the borrow stack, which is exactly the kind of
- // operation that all this caching exists to avoid.
- }
-
- /// Find the item granting the given kind of access to the given tag, and return where
- /// it is on the stack. For wildcard tags, the given index is approximate, but if *no*
- /// index is given it means the match was *not* in the known part of the stack.
- /// `Ok(None)` indicates it matched the "unknown" part of the stack.
- /// `Err` indicates it was not found.
- pub(super) fn find_granting(
- &mut self,
- access: AccessKind,
- tag: ProvenanceExtra,
- exposed_tags: &FxHashSet<SbTag>,
- ) -> Result<Option<usize>, ()> {
- #[cfg(all(feature = "stack-cache", debug_assertions))]
- self.verify_cache_consistency();
-
- let ProvenanceExtra::Concrete(tag) = tag else {
- // Handle the wildcard case.
- // Go search the stack for an exposed tag.
- if let Some(idx) =
- self.borrows
- .iter()
- .enumerate() // we also need to know *where* in the stack
- .rev() // search top-to-bottom
- .find_map(|(idx, item)| {
- // If the item fits and *might* be this wildcard, use it.
- if item.perm().grants(access) && exposed_tags.contains(&item.tag()) {
- Some(idx)
- } else {
- None
- }
- })
- {
- return Ok(Some(idx));
- }
- // If we couldn't find it in the stack, check the unknown bottom.
- return if self.unknown_bottom.is_some() { Ok(None) } else { Err(()) };
- };
-
- if let Some(idx) = self.find_granting_tagged(access, tag) {
- return Ok(Some(idx));
- }
-
- // Couldn't find it in the stack; but if there is an unknown bottom it might be there.
- let found = self.unknown_bottom.is_some_and(|unknown_limit| {
- tag.0 < unknown_limit.0 // unknown_limit is an upper bound for what can be in the unknown bottom.
- });
- if found { Ok(None) } else { Err(()) }
- }
-
- fn find_granting_tagged(&mut self, access: AccessKind, tag: SbTag) -> Option<usize> {
- #[cfg(feature = "stack-cache")]
- if let Some(idx) = self.find_granting_cache(access, tag) {
- return Some(idx);
- }
-
- // If we didn't find the tag in the cache, fall back to a linear search of the
- // whole stack, and add the tag to the cache.
- for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
- if tag == item.tag() && item.perm().grants(access) {
- #[cfg(feature = "stack-cache")]
- self.cache.add(stack_idx, *item);
- return Some(stack_idx);
- }
- }
- None
- }
-
- #[cfg(feature = "stack-cache")]
- fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option<usize> {
- // This looks like a common-sense optimization; we're going to do a linear search of the
- // cache or the borrow stack to scan the shorter of the two. This optimization is miniscule
- // and this check actually ensures we do not access an invalid cache.
- // When a stack is created and when items are removed from the top of the borrow stack, we
- // need some valid value to populate the cache. In both cases, we try to use the bottom
- // item. But when the stack is cleared in `set_unknown_bottom` there is nothing we could
- // place in the cache that is correct. But due to the way we populate the cache in
- // `StackCache::add`, we know that when the borrow stack has grown larger than the cache,
- // every slot in the cache is valid.
- if self.borrows.len() <= CACHE_LEN {
- return None;
- }
- // Search the cache for the tag we're looking up
- let cache_idx = self.cache.items.iter().position(|t| t.tag() == tag)?;
- let stack_idx = self.cache.idx[cache_idx];
- // If we found the tag, look up its position in the stack to see if it grants
- // the required permission
- if self.cache.items[cache_idx].perm().grants(access) {
- // If it does, and it's not already in the most-recently-used position, re-insert it at
- // the most-recently-used position. This technically reduces the efficiency of the
- // cache by duplicating elements, but current benchmarks do not seem to benefit from
- // avoiding this duplication.
- // But if the tag is in position 1, avoiding the duplicating add is trivial.
- // If it does, and it's not already in the most-recently-used position, move it there.
- // Except if the tag is in position 1, this is equivalent to just a swap, so do that.
- if cache_idx == 1 {
- self.cache.items.swap(0, 1);
- self.cache.idx.swap(0, 1);
- } else if cache_idx > 1 {
- self.cache.add(stack_idx, self.cache.items[cache_idx]);
- }
- Some(stack_idx)
- } else {
- // Tag is in the cache, but it doesn't grant the required permission
- None
- }
- }
-
- pub fn insert(&mut self, new_idx: usize, new: Item) {
- self.borrows.insert(new_idx, new);
-
- #[cfg(feature = "stack-cache")]
- self.insert_cache(new_idx, new);
- }
-
- #[cfg(feature = "stack-cache")]
- fn insert_cache(&mut self, new_idx: usize, new: Item) {
- // Adjust the possibly-unique range if an insert occurs before or within it
- if self.unique_range.start >= new_idx {
- self.unique_range.start += 1;
- }
- if self.unique_range.end >= new_idx {
- self.unique_range.end += 1;
- }
- if new.perm() == Permission::Unique {
- // If this is the only Unique, set the range to contain just the new item.
- if self.unique_range.is_empty() {
- self.unique_range = new_idx..new_idx + 1;
- } else {
- // We already have other Unique items, expand the range to include the new item
- self.unique_range.start = self.unique_range.start.min(new_idx);
- self.unique_range.end = self.unique_range.end.max(new_idx + 1);
- }
- }
-
- // The above insert changes the meaning of every index in the cache >= new_idx, so now
- // we need to find every one of those indexes and increment it.
- // But if the insert is at the end (equivalent to a push), we can skip this step because
- // it didn't change the position of any other items.
- if new_idx != self.borrows.len() - 1 {
- for idx in &mut self.cache.idx {
- if *idx >= new_idx {
- *idx += 1;
- }
- }
- }
-
- // This primes the cache for the next access, which is almost always the just-added tag.
- self.cache.add(new_idx, new);
-
- #[cfg(debug_assertions)]
- self.verify_cache_consistency();
- }
-
- /// Construct a new `Stack` using the passed `Item` as the base tag.
- pub fn new(item: Item) -> Self {
- Stack {
- borrows: vec![item],
- unknown_bottom: None,
- #[cfg(feature = "stack-cache")]
- cache: StackCache { idx: [0; CACHE_LEN], items: [item; CACHE_LEN] },
- #[cfg(feature = "stack-cache")]
- unique_range: if item.perm() == Permission::Unique { 0..1 } else { 0..0 },
- }
- }
-
- pub fn get(&self, idx: usize) -> Option<Item> {
- self.borrows.get(idx).cloned()
- }
-
- #[allow(clippy::len_without_is_empty)] // Stacks are never empty
- pub fn len(&self) -> usize {
- self.borrows.len()
- }
-
- pub fn unknown_bottom(&self) -> Option<SbTag> {
- self.unknown_bottom
- }
-
- pub fn set_unknown_bottom(&mut self, tag: SbTag) {
- // We clear the borrow stack but the lookup cache doesn't support clearing per se. Instead,
- // there is a check explained in `find_granting_cache` which protects against accessing the
- // cache when it has been cleared and not yet refilled.
- self.borrows.clear();
- self.unknown_bottom = Some(tag);
- #[cfg(feature = "stack-cache")]
- {
- self.unique_range = 0..0;
- }
- }
-
- /// Find all `Unique` elements in this borrow stack above `granting_idx`, pass a copy of them
- /// to the `visitor`, then set their `Permission` to `Disabled`.
- pub fn disable_uniques_starting_at(
- &mut self,
- disable_start: usize,
- mut visitor: impl FnMut(Item) -> crate::InterpResult<'tcx>,
- ) -> crate::InterpResult<'tcx> {
- #[cfg(feature = "stack-cache")]
- let unique_range = self.unique_range.clone();
- #[cfg(not(feature = "stack-cache"))]
- let unique_range = 0..self.len();
-
- if disable_start <= unique_range.end {
- let lower = unique_range.start.max(disable_start);
- let upper = unique_range.end;
- for item in &mut self.borrows[lower..upper] {
- if item.perm() == Permission::Unique {
- log::trace!("access: disabling item {:?}", item);
- visitor(*item)?;
- item.set_permission(Permission::Disabled);
- // Also update all copies of this item in the cache.
- #[cfg(feature = "stack-cache")]
- for it in &mut self.cache.items {
- if it.tag() == item.tag() {
- it.set_permission(Permission::Disabled);
- }
- }
- }
- }
- }
-
- #[cfg(feature = "stack-cache")]
- if disable_start <= self.unique_range.start {
- // We disabled all Unique items
- self.unique_range.start = 0;
- self.unique_range.end = 0;
- } else {
- // Truncate the range to only include items up to the index that we started disabling
- // at.
- self.unique_range.end = self.unique_range.end.min(disable_start);
- }
-
- #[cfg(all(feature = "stack-cache", debug_assertions))]
- self.verify_cache_consistency();
-
- Ok(())
- }
-
- /// Produces an iterator which iterates over `range` in reverse, and when dropped removes that
- /// range of `Item`s from this `Stack`.
- pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
- &mut self,
- start: usize,
- mut visitor: V,
- ) -> crate::InterpResult<'tcx> {
- while self.borrows.len() > start {
- let item = self.borrows.pop().unwrap();
- visitor(item)?;
- }
-
- #[cfg(feature = "stack-cache")]
- if !self.borrows.is_empty() {
- // After we remove from the borrow stack, every aspect of our caching may be invalid, but it is
- // also possible that the whole cache is still valid. So we call this method to repair what
- // aspects of the cache are now invalid, instead of resetting the whole thing to a trivially
- // valid default state.
- let base_tag = self.borrows[0];
- let mut removed = 0;
- let mut cursor = 0;
- // Remove invalid entries from the cache by rotating them to the end of the cache, then
- // keep track of how many invalid elements there are and overwrite them with the base tag.
- // The base tag here serves as a harmless default value.
- for _ in 0..CACHE_LEN - 1 {
- if self.cache.idx[cursor] >= start {
- self.cache.idx[cursor..CACHE_LEN - removed].rotate_left(1);
- self.cache.items[cursor..CACHE_LEN - removed].rotate_left(1);
- removed += 1;
- } else {
- cursor += 1;
- }
- }
- for i in CACHE_LEN - removed - 1..CACHE_LEN {
- self.cache.idx[i] = 0;
- self.cache.items[i] = base_tag;
- }
-
- if start <= self.unique_range.start {
- // We removed all the Unique items
- self.unique_range = 0..0;
- } else {
- // Ensure the range doesn't extend past the new top of the stack
- self.unique_range.end = self.unique_range.end.min(start);
- }
- } else {
- self.unique_range = 0..0;
- }
-
- #[cfg(all(feature = "stack-cache", debug_assertions))]
- self.verify_cache_consistency();
- Ok(())
- }
-}