1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std::assert_matches::assert_matches;
11 use std::collections::VecDeque;
15 use rustc_ast::Mutability;
16 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
17 use rustc_middle::mir::display_allocation;
18 use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
19 use rustc_target::abi::{Align, HasDataLayout, Size};
21 use crate::const_eval::CheckAlignment;
24 alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
25 InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
28 #[derive(Debug, PartialEq, Copy, Clone)]
29 pub enum MemoryKind<T> {
30 /// Stack memory. Error if deallocated except during a stack pop.
32 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
34 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
38 impl<T: MayLeak> MayLeak for MemoryKind<T> {
40 fn may_leak(self) -> bool {
42 MemoryKind::Stack => false,
43 MemoryKind::CallerLocation => true,
44 MemoryKind::Machine(k) => k.may_leak(),
49 impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
50 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
52 MemoryKind::Stack => write!(f, "stack variable"),
53 MemoryKind::CallerLocation => write!(f, "caller location"),
54 MemoryKind::Machine(m) => write!(f, "{}", m),
59 /// The return value of `get_alloc_info` indicates the "kind" of the allocation.
61 /// A regular live data allocation.
63 /// A function allocation (that fn ptrs point to).
65 /// A (symbolic) vtable allocation.
67 /// A dead allocation.
71 /// The value of a function pointer.
72 #[derive(Debug, Copy, Clone)]
73 pub enum FnVal<'tcx, Other> {
74 Instance(Instance<'tcx>),
78 impl<'tcx, Other> FnVal<'tcx, Other> {
79 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
81 FnVal::Instance(instance) => Ok(instance),
83 throw_unsup_format!("'foreign' function pointers are not supported in this context")
89 // `Memory` has to depend on the `Machine` because some of its operations
90 // (e.g., `get`) call a `Machine` hook.
91 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
92 /// Allocations local to this instance of the miri engine. The kind
93 /// helps ensure that the same mechanism is used for allocation and
94 /// deallocation. When an allocation is not found here, it is a
95 /// global and looked up in the `tcx` for read access. Some machines may
96 /// have to mutate this map even on a read-only access to a global (because
97 /// they do pointer provenance tracking and the allocations in `tcx` have
98 /// the wrong type), so we let the machine override this type.
99 /// Either way, if the machine allows writing to a global, doing so will
100 /// create a copy of the global allocation here.
101 // FIXME: this should not be public, but interning currently needs access to it
102 pub(super) alloc_map: M::MemoryMap,
104 /// Map for "extra" function pointers.
105 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
107 /// To be able to compare pointers with null, and to check alignment for accesses
108 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
109 /// that do not exist any more.
110 // FIXME: this should not be public, but interning currently needs access to it
111 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
114 /// A reference to some allocation that was already bounds-checked for the given region
115 /// and had the on-access machine hooks run.
116 #[derive(Copy, Clone)]
117 pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra> {
118 alloc: &'a Allocation<Prov, Extra>,
123 /// A reference to some allocation that was already bounds-checked for the given region
124 /// and had the on-access machine hooks run.
125 pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra> {
126 alloc: &'a mut Allocation<Prov, Extra>,
132 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
133 pub fn new() -> Self {
135 alloc_map: M::MemoryMap::default(),
136 extra_fn_ptr_map: FxHashMap::default(),
137 dead_alloc_map: FxHashMap::default(),
141 /// This is used by [priroda](https://github.com/oli-obk/priroda)
142 pub fn alloc_map(&self) -> &M::MemoryMap {
147 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
148 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
149 /// the machine pointer to the allocation. Must never be used
150 /// for any other pointers, nor for TLS statics.
152 /// Using the resulting pointer represents a *direct* access to that memory
153 /// (e.g. by directly using a `static`),
154 /// as opposed to access through a pointer that was created by the program.
156 /// This function can fail only if `ptr` points to an `extern static`.
158 pub fn global_base_pointer(
160 ptr: Pointer<AllocId>,
161 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
162 let alloc_id = ptr.provenance;
163 // We need to handle `extern static`.
164 match self.tcx.try_get_global_alloc(alloc_id) {
165 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
166 bug!("global memory cannot point to thread-local static")
168 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
169 return M::extern_static_base_pointer(self, def_id);
173 // And we need to get the provenance.
174 Ok(M::adjust_alloc_base_pointer(self, ptr))
177 pub fn create_fn_alloc_ptr(
179 fn_val: FnVal<'tcx, M::ExtraFnVal>,
180 ) -> Pointer<M::Provenance> {
181 let id = match fn_val {
182 FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
183 FnVal::Other(extra) => {
184 // FIXME(RalfJung): Should we have a cache here?
185 let id = self.tcx.reserve_alloc_id();
186 let old = self.memory.extra_fn_ptr_map.insert(id, extra);
187 assert!(old.is_none());
191 // Functions are global allocations, so make sure we get the right base pointer.
192 // We know this is not an `extern static` so this cannot fail.
193 self.global_base_pointer(Pointer::from(id)).unwrap()
200 kind: MemoryKind<M::MemoryKind>,
201 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
202 let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
203 // We can `unwrap` since `alloc` contains no pointers.
204 Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
207 pub fn allocate_bytes_ptr(
211 kind: MemoryKind<M::MemoryKind>,
212 mutability: Mutability,
213 ) -> Pointer<M::Provenance> {
214 let alloc = Allocation::from_bytes(bytes, align, mutability);
215 // We can `unwrap` since `alloc` contains no pointers.
216 self.allocate_raw_ptr(alloc, kind).unwrap()
219 /// This can fail only of `alloc` contains provenance.
220 pub fn allocate_raw_ptr(
223 kind: MemoryKind<M::MemoryKind>,
224 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
225 let id = self.tcx.reserve_alloc_id();
228 M::GLOBAL_KIND.map(MemoryKind::Machine),
229 "dynamically allocating global memory"
231 let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
232 self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
233 Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
236 pub fn reallocate_ptr(
238 ptr: Pointer<Option<M::Provenance>>,
239 old_size_and_align: Option<(Size, Align)>,
242 kind: MemoryKind<M::MemoryKind>,
243 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
244 let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
245 if offset.bytes() != 0 {
247 "reallocating {:?} which does not point to the beginning of an object",
252 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
253 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
254 let new_ptr = self.allocate_ptr(new_size, new_align, kind)?;
255 let old_size = match old_size_and_align {
256 Some((size, _align)) => size,
257 None => self.get_alloc_raw(alloc_id)?.size(),
259 // This will also call the access hooks.
265 old_size.min(new_size),
266 /*nonoverlapping*/ true,
268 self.deallocate_ptr(ptr, old_size_and_align, kind)?;
273 #[instrument(skip(self), level = "debug")]
274 pub fn deallocate_ptr(
276 ptr: Pointer<Option<M::Provenance>>,
277 old_size_and_align: Option<(Size, Align)>,
278 kind: MemoryKind<M::MemoryKind>,
279 ) -> InterpResult<'tcx> {
280 let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr)?;
281 trace!("deallocating: {alloc_id:?}");
283 if offset.bytes() != 0 {
285 "deallocating {:?} which does not point to the beginning of an object",
290 let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
291 // Deallocating global memory -- always an error
292 return Err(match self.tcx.try_get_global_alloc(alloc_id) {
293 Some(GlobalAlloc::Function(..)) => {
294 err_ub_format!("deallocating {alloc_id:?}, which is a function")
296 Some(GlobalAlloc::VTable(..)) => {
297 err_ub_format!("deallocating {alloc_id:?}, which is a vtable")
299 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
300 err_ub_format!("deallocating {alloc_id:?}, which is static memory")
302 None => err_ub!(PointerUseAfterFree(alloc_id)),
307 if alloc.mutability == Mutability::Not {
308 throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
310 if alloc_kind != kind {
312 "deallocating {alloc_id:?}, which is {alloc_kind} memory, using {kind} deallocation operation"
315 if let Some((size, align)) = old_size_and_align {
316 if size != alloc.size() || align != alloc.align {
318 "incorrect layout on deallocation: {alloc_id:?} has size {} and alignment {}, but gave size {} and alignment {}",
319 alloc.size().bytes(),
327 // Let the machine take some extra action
328 let size = alloc.size();
329 M::before_memory_deallocation(
334 alloc_range(Size::ZERO, size),
337 // Don't forget to remember size and align of this now-dead allocation
338 let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
340 bug!("Nothing can be deallocated twice");
346 /// Internal helper function to determine the allocation and offset of a pointer (if any).
350 ptr: Pointer<Option<M::Provenance>>,
353 ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
354 self.check_and_deref_ptr(
358 M::enforce_alignment(self),
359 CheckInAllocMsg::MemoryAccessTest,
360 |alloc_id, offset, prov| {
361 let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
362 Ok((size, align, (alloc_id, offset, prov)))
367 /// Check if the given pointer points to live memory of given `size` and `align`
368 /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
369 /// out-of-bounds case.
371 pub fn check_ptr_access_align(
373 ptr: Pointer<Option<M::Provenance>>,
376 msg: CheckInAllocMsg,
377 ) -> InterpResult<'tcx> {
378 self.check_and_deref_ptr(
382 CheckAlignment::Error,
385 let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
386 Ok((size, align, ()))
392 /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
393 /// to the allocation it points to. Supports both shared and mutable references, as the actual
394 /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
395 /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
396 fn check_and_deref_ptr<T>(
398 ptr: Pointer<Option<M::Provenance>>,
401 check: CheckAlignment,
402 msg: CheckInAllocMsg,
403 alloc_size: impl FnOnce(
407 ) -> InterpResult<'tcx, (Size, Align, T)>,
408 ) -> InterpResult<'tcx, Option<T>> {
409 Ok(match self.ptr_try_get_alloc_id(ptr) {
411 // We couldn't get a proper allocation. This is only okay if the access size is 0,
412 // and the address is not null.
413 if size.bytes() > 0 || addr == 0 {
414 throw_ub!(DanglingIntPointer(addr, msg));
417 if check.should_check() {
418 self.check_offset_align(addr, align, check)?;
422 Ok((alloc_id, offset, prov)) => {
423 let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
424 // Test bounds. This also ensures non-null.
425 // It is sufficient to check this for the end pointer. Also check for overflow!
426 if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
427 throw_ub!(PointerOutOfBounds {
430 ptr_offset: self.machine_usize_to_isize(offset.bytes()),
435 // Ensure we never consider the null pointer dereferenceable.
436 if M::Provenance::OFFSET_IS_ADDR {
437 assert_ne!(ptr.addr(), Size::ZERO);
439 // Test align. Check this last; if both bounds and alignment are violated
440 // we want the error to be about the bounds.
441 if check.should_check() {
442 if M::use_addr_for_alignment_check(self) {
443 // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
444 self.check_offset_align(ptr.addr().bytes(), align, check)?;
446 // Check allocation alignment and offset alignment.
447 if alloc_align.bytes() < align.bytes() {
448 M::alignment_check_failed(self, alloc_align, align, check)?;
450 self.check_offset_align(offset.bytes(), align, check)?;
454 // We can still be zero-sized in this branch, in which case we have to
456 if size.bytes() == 0 { None } else { Some(ret_val) }
461 fn check_offset_align(
465 check: CheckAlignment,
466 ) -> InterpResult<'tcx> {
467 if offset % align.bytes() == 0 {
470 // The biggest power of two through which `offset` is divisible.
471 let offset_pow2 = 1 << offset.trailing_zeros();
472 M::alignment_check_failed(self, Align::from_bytes(offset_pow2).unwrap(), align, check)
477 /// Allocation accessors
478 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
479 /// Helper function to obtain a global (tcx) allocation.
480 /// This attempts to return a reference to an existing allocation if
481 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
482 /// this machine use the same pointer provenance, so it is indirected through
483 /// `M::adjust_allocation`.
488 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
489 let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
490 Some(GlobalAlloc::Memory(mem)) => {
491 // Memory of a constant or promoted or anonymous memory referenced by a static.
494 Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
495 Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
496 None => throw_ub!(PointerUseAfterFree(id)),
497 Some(GlobalAlloc::Static(def_id)) => {
498 assert!(self.tcx.is_static(def_id));
499 assert!(!self.tcx.is_thread_local_static(def_id));
500 // Notice that every static has two `AllocId` that will resolve to the same
501 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
502 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
503 // `eval_static_initializer` and it is the "resolved" ID.
504 // The resolved ID is never used by the interpreted program, it is hidden.
505 // This is relied upon for soundness of const-patterns; a pointer to the resolved
506 // ID would "sidestep" the checks that make sure consts do not point to statics!
507 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
508 // contains a reference to memory that was created during its evaluation (i.e., not
509 // to another static), those inner references only exist in "resolved" form.
510 if self.tcx.is_foreign_item(def_id) {
511 // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
512 // referencing arbitrary (declared) extern statics.
513 throw_unsup!(ReadExternStatic(def_id));
516 // We don't give a span -- statics don't need that, they cannot be generic or associated.
517 let val = self.ctfe_query(None, |tcx| tcx.eval_static_initializer(def_id))?;
521 M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
522 // We got tcx memory. Let the machine initialize its "extra" stuff.
523 M::adjust_allocation(
525 id, // always use the ID we got as input, not the "hidden" one.
526 Cow::Borrowed(alloc.inner()),
527 M::GLOBAL_KIND.map(MemoryKind::Machine),
531 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
532 /// The caller is responsible for calling the access hooks!
534 /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
538 ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
539 // The error type of the inner closure here is somewhat funny. We have two
540 // ways of "erroring": An actual error, or because we got a reference from
541 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
542 // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
543 let a = self.memory.alloc_map.get_or(id, || {
544 let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
546 Cow::Borrowed(alloc) => {
547 // We got a ref, cheaply return that as an "error" so that the
548 // map does not get mutated.
551 Cow::Owned(alloc) => {
552 // Need to put it into the map and return a ref to that
553 let kind = M::GLOBAL_KIND.expect(
554 "I got a global allocation that I have to copy but the machine does \
555 not expect that to happen",
557 Ok((MemoryKind::Machine(kind), alloc))
561 // Now unpack that funny error type
568 /// "Safe" (bounds and align-checked) allocation access.
569 pub fn get_ptr_alloc<'a>(
571 ptr: Pointer<Option<M::Provenance>>,
574 ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
575 let ptr_and_alloc = self.check_and_deref_ptr(
579 M::enforce_alignment(self),
580 CheckInAllocMsg::MemoryAccessTest,
581 |alloc_id, offset, prov| {
582 let alloc = self.get_alloc_raw(alloc_id)?;
583 Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
586 if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
587 let range = alloc_range(offset, size);
588 M::before_memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
589 Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
591 // Even in this branch we have to be sure that we actually access the allocation, in
592 // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
593 // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
594 // always called when `ptr` has an `AllocId`.
599 /// Return the `extra` field of the given allocation.
600 pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
601 Ok(&self.get_alloc_raw(id)?.extra)
604 /// Return the `mutability` field of the given allocation.
605 pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
606 Ok(self.get_alloc_raw(id)?.mutability)
609 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
610 /// The caller is responsible for calling the access hooks!
612 /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
614 fn get_alloc_raw_mut(
617 ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
618 // We have "NLL problem case #3" here, which cannot be worked around without loss of
619 // efficiency even for the common case where the key is in the map.
620 // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
621 // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
622 if self.memory.alloc_map.get_mut(id).is_none() {
624 // Allocation not found locally, go look global.
625 let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
626 let kind = M::GLOBAL_KIND.expect(
627 "I got a global allocation that I have to copy but the machine does \
628 not expect that to happen",
630 self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
633 let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
634 if alloc.mutability == Mutability::Not {
635 throw_ub!(WriteToReadOnly(id))
637 Ok((alloc, &mut self.machine))
640 /// "Safe" (bounds and align-checked) allocation access.
641 pub fn get_ptr_alloc_mut<'a>(
643 ptr: Pointer<Option<M::Provenance>>,
646 ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
647 let parts = self.get_ptr_access(ptr, size, align)?;
648 if let Some((alloc_id, offset, prov)) = parts {
650 // FIXME: can we somehow avoid looking up the allocation twice here?
651 // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
652 let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
653 let range = alloc_range(offset, size);
654 M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
655 Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
661 /// Return the `extra` field of the given allocation.
662 pub fn get_alloc_extra_mut<'a>(
665 ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
666 let (alloc, machine) = self.get_alloc_raw_mut(id)?;
667 Ok((&mut alloc.extra, machine))
670 /// Obtain the size and alignment of an allocation, even if that allocation has
671 /// been deallocated.
672 pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) {
673 // # Regular allocations
674 // Don't use `self.get_raw` here as that will
675 // a) cause cycles in case `id` refers to a static
676 // b) duplicate a global's allocation in miri
677 if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
678 return (alloc.size(), alloc.align, AllocKind::LiveData);
681 // # Function pointers
682 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
683 if self.get_fn_alloc(id).is_some() {
684 return (Size::ZERO, Align::ONE, AllocKind::Function);
688 // Can't do this in the match argument, we may get cycle errors since the lock would
689 // be held throughout the match.
690 match self.tcx.try_get_global_alloc(id) {
691 Some(GlobalAlloc::Static(def_id)) => {
692 assert!(self.tcx.is_static(def_id));
693 assert!(!self.tcx.is_thread_local_static(def_id));
694 // Use size and align of the type.
695 let ty = self.tcx.type_of(def_id);
696 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
697 assert!(layout.is_sized());
698 (layout.size, layout.align.abi, AllocKind::LiveData)
700 Some(GlobalAlloc::Memory(alloc)) => {
701 // Need to duplicate the logic here, because the global allocations have
702 // different associated types than the interpreter-local ones.
703 let alloc = alloc.inner();
704 (alloc.size(), alloc.align, AllocKind::LiveData)
706 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
707 Some(GlobalAlloc::VTable(..)) => {
708 // No data to be accessed here. But vtables are pointer-aligned.
709 return (Size::ZERO, self.tcx.data_layout.pointer_align.abi, AllocKind::VTable);
711 // The rest must be dead.
713 // Deallocated pointers are allowed, we should be able to find
715 let (size, align) = *self
719 .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
720 (size, align, AllocKind::Dead)
725 /// Obtain the size and alignment of a live allocation.
726 pub fn get_live_alloc_size_and_align(&self, id: AllocId) -> InterpResult<'tcx, (Size, Align)> {
727 let (size, align, kind) = self.get_alloc_info(id);
728 if matches!(kind, AllocKind::Dead) {
729 throw_ub!(PointerUseAfterFree(id))
734 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
735 if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
736 Some(FnVal::Other(*extra))
738 match self.tcx.try_get_global_alloc(id) {
739 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
747 ptr: Pointer<Option<M::Provenance>>,
748 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
749 trace!("get_ptr_fn({:?})", ptr);
750 let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
751 if offset.bytes() != 0 {
752 throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
754 self.get_fn_alloc(alloc_id)
755 .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
758 pub fn get_ptr_vtable(
760 ptr: Pointer<Option<M::Provenance>>,
761 ) -> InterpResult<'tcx, (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)> {
762 trace!("get_ptr_vtable({:?})", ptr);
763 let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
764 if offset.bytes() != 0 {
765 throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
767 match self.tcx.try_get_global_alloc(alloc_id) {
768 Some(GlobalAlloc::VTable(ty, trait_ref)) => Ok((ty, trait_ref)),
769 _ => throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset))),
773 pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
774 self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
778 /// Create a lazy debug printer that prints the given allocation and all allocations it points
781 pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
782 self.dump_allocs(vec![id])
785 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
788 pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
791 DumpAllocs { ecx: self, allocs }
794 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
795 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
796 pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
797 // Collect the set of allocations that are *reachable* from `Global` allocations.
799 let mut reachable = FxHashSet::default();
800 let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
801 let mut todo: Vec<_> =
802 self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
803 if Some(kind) == global_kind { Some(id) } else { None }
805 todo.extend(static_roots);
806 while let Some(id) = todo.pop() {
807 if reachable.insert(id) {
808 // This is a new allocation, add the allocation it points to `todo`.
809 if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
811 alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
819 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
820 let leaks: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
821 if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
825 eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
832 /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
833 pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
834 ecx: &'a InterpCx<'mir, 'tcx, M>,
835 allocs: Vec<AllocId>,
838 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
839 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
840 // Cannot be a closure because it is generic in `Prov`, `Extra`.
841 fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
842 fmt: &mut std::fmt::Formatter<'_>,
844 allocs_to_print: &mut VecDeque<AllocId>,
845 alloc: &Allocation<Prov, Extra>,
846 ) -> std::fmt::Result {
847 for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
849 allocs_to_print.push_back(alloc_id);
851 write!(fmt, "{}", display_allocation(tcx, alloc))
854 let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
855 // `allocs_printed` contains all allocations that we have already printed.
856 let mut allocs_printed = FxHashSet::default();
858 while let Some(id) = allocs_to_print.pop_front() {
859 if !allocs_printed.insert(id) {
860 // Already printed, so skip this.
864 write!(fmt, "{id:?}")?;
865 match self.ecx.memory.alloc_map.get(id) {
866 Some(&(kind, ref alloc)) => {
868 write!(fmt, " ({}, ", kind)?;
869 write_allocation_track_relocs(
872 &mut allocs_to_print,
878 match self.ecx.tcx.try_get_global_alloc(id) {
879 Some(GlobalAlloc::Memory(alloc)) => {
880 write!(fmt, " (unchanged global, ")?;
881 write_allocation_track_relocs(
884 &mut allocs_to_print,
888 Some(GlobalAlloc::Function(func)) => {
889 write!(fmt, " (fn: {func})")?;
891 Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
892 write!(fmt, " (vtable: impl {trait_ref} for {ty})")?;
894 Some(GlobalAlloc::VTable(ty, None)) => {
895 write!(fmt, " (vtable: impl <auto trait> for {ty})")?;
897 Some(GlobalAlloc::Static(did)) => {
898 write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
901 write!(fmt, " (deallocated)")?;
912 /// Reading and writing.
913 impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
914 /// `range` is relative to this allocation reference, not the base of the allocation.
915 pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
916 let range = self.range.subrange(range);
917 debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
920 .write_scalar(&self.tcx, range, val)
921 .map_err(|e| e.to_interp_error(self.alloc_id))?)
924 /// `offset` is relative to this allocation reference, not the base of the allocation.
925 pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
926 self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
929 /// Mark the entire referenced range as uninitialized
930 pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
933 .write_uninit(&self.tcx, self.range)
934 .map_err(|e| e.to_interp_error(self.alloc_id))?)
938 impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
939 /// `range` is relative to this allocation reference, not the base of the allocation.
943 read_provenance: bool,
944 ) -> InterpResult<'tcx, Scalar<Prov>> {
945 let range = self.range.subrange(range);
948 .read_scalar(&self.tcx, range, read_provenance)
949 .map_err(|e| e.to_interp_error(self.alloc_id))?;
950 debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
954 /// `range` is relative to this allocation reference, not the base of the allocation.
955 pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
956 self.read_scalar(range, /*read_provenance*/ false)
959 /// `offset` is relative to this allocation reference, not the base of the allocation.
960 pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
962 alloc_range(offset, self.tcx.data_layout().pointer_size),
963 /*read_provenance*/ true,
967 /// `range` is relative to this allocation reference, not the base of the allocation.
968 pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
971 .get_bytes_strip_provenance(&self.tcx, self.range)
972 .map_err(|e| e.to_interp_error(self.alloc_id))?)
975 /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
976 pub(crate) fn has_provenance(&self) -> bool {
977 !self.alloc.provenance().range_empty(self.range, &self.tcx)
981 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
982 /// Reads the given number of bytes from memory, and strips their provenance if possible.
983 /// Returns them as a slice.
985 /// Performs appropriate bounds checks.
986 pub fn read_bytes_ptr_strip_provenance(
988 ptr: Pointer<Option<M::Provenance>>,
990 ) -> InterpResult<'tcx, &[u8]> {
991 let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
995 // Side-step AllocRef and directly access the underlying bytes more efficiently.
996 // (We are staying inside the bounds here so all is good.)
999 .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1000 .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
1003 /// Writes the given stream of bytes into memory.
1005 /// Performs appropriate bounds checks.
1006 pub fn write_bytes_ptr(
1008 ptr: Pointer<Option<M::Provenance>>,
1009 src: impl IntoIterator<Item = u8>,
1010 ) -> InterpResult<'tcx> {
1011 let mut src = src.into_iter();
1012 let (lower, upper) = src.size_hint();
1013 let len = upper.expect("can only write bounded iterators");
1014 assert_eq!(lower, len, "can only write iterators with a precise length");
1016 let size = Size::from_bytes(len);
1017 let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
1018 // zero-sized access
1022 "iterator said it was empty but returned an element"
1027 // Side-step AllocRef and directly access the underlying bytes more efficiently.
1028 // (We are staying inside the bounds here so all is good.)
1029 let alloc_id = alloc_ref.alloc_id;
1030 let bytes = alloc_ref
1032 .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
1033 .map_err(move |e| e.to_interp_error(alloc_id))?;
1034 // `zip` would stop when the first iterator ends; we want to definitely
1035 // cover all of `bytes`.
1037 *dest = src.next().expect("iterator was shorter than it said it would be");
1039 assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1045 src: Pointer<Option<M::Provenance>>,
1047 dest: Pointer<Option<M::Provenance>>,
1050 nonoverlapping: bool,
1051 ) -> InterpResult<'tcx> {
1052 self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
1055 pub fn mem_copy_repeatedly(
1057 src: Pointer<Option<M::Provenance>>,
1059 dest: Pointer<Option<M::Provenance>>,
1063 nonoverlapping: bool,
1064 ) -> InterpResult<'tcx> {
1066 // We need to do our own bounds-checks.
1067 let src_parts = self.get_ptr_access(src, size, src_align)?;
1068 let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
1070 // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1071 // and once below to get the underlying `&[mut] Allocation`.
1073 // Source alloc preparations and access hooks.
1074 let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
1075 // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1078 let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1079 let src_range = alloc_range(src_offset, size);
1080 M::before_memory_read(
1084 (src_alloc_id, src_prov),
1087 // We need the `dest` ptr for the next operation, so we get it now.
1088 // We already did the source checks and called the hooks so we are good to return early.
1089 let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
1090 // Zero-sized *destination*.
1094 // Prepare getting source provenance.
1095 let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1096 // first copy the provenance to a temporary buffer, because
1097 // `get_bytes_mut` will clear the provenance, which is correct,
1098 // since we don't want to keep any provenance at the target.
1099 // This will also error if copying partial provenance is not supported.
1100 let provenance = src_alloc
1102 .prepare_copy(src_range, dest_offset, num_copies, self)
1103 .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1104 // Prepare a copy of the initialization mask.
1105 let init = src_alloc.init_mask().prepare_copy(src_range);
1107 // Destination alloc preparations and access hooks.
1108 let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
1109 let dest_range = alloc_range(dest_offset, size * num_copies);
1110 M::before_memory_write(
1113 &mut dest_alloc.extra,
1114 (dest_alloc_id, dest_prov),
1117 let dest_bytes = dest_alloc
1118 .get_bytes_mut_ptr(&tcx, dest_range)
1119 .map_err(|e| e.to_interp_error(dest_alloc_id))?
1122 if init.no_bytes_init() {
1123 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1124 // is marked as uninitialized but we otherwise omit changing the byte representation which may
1125 // be arbitrary for uninitialized bytes.
1126 // This also avoids writing to the target bytes so that the backing allocation is never
1127 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1128 // operating system this can avoid physically allocating the page.
1130 .write_uninit(&tcx, dest_range)
1131 .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1132 // We can forget about the provenance, this is all not initialized anyway.
1136 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1137 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1138 // `dest` could possibly overlap.
1139 // The pointers above remain valid even if the `HashMap` table is moved around because they
1140 // point into the `Vec` storing the bytes.
1142 if src_alloc_id == dest_alloc_id {
1145 if (src_offset <= dest_offset && src_offset + size > dest_offset)
1146 || (dest_offset <= src_offset && dest_offset + size > src_offset)
1148 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
1152 for i in 0..num_copies {
1155 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
1160 for i in 0..num_copies {
1161 ptr::copy_nonoverlapping(
1163 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
1170 // now fill in all the "init" data
1171 dest_alloc.init_mask_apply_copy(
1173 alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1176 // copy the provenance to the destination
1177 dest_alloc.provenance_apply_copy(provenance);
1183 /// Machine pointer introspection.
1184 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
1185 /// Test if this value might be null.
1186 /// If the machine does not support ptr-to-int casts, this is conservative.
1187 pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1188 Ok(match scalar.try_to_int() {
1189 Ok(int) => int.is_null(),
1191 // Can only happen during CTFE.
1192 let ptr = scalar.to_pointer(self)?;
1193 match self.ptr_try_get_alloc_id(ptr) {
1194 Ok((alloc_id, offset, _)) => {
1195 let (size, _align, _kind) = self.get_alloc_info(alloc_id);
1196 // If the pointer is out-of-bounds, it may be null.
1197 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
1200 Err(_offset) => bug!("a non-int scalar is always a pointer"),
1206 /// Turning a "maybe pointer" into a proper pointer (and some information
1207 /// about where it points), or an absolute address.
1208 pub fn ptr_try_get_alloc_id(
1210 ptr: Pointer<Option<M::Provenance>>,
1211 ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1212 match ptr.into_pointer_or_addr() {
1213 Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
1214 Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1216 assert!(M::Provenance::OFFSET_IS_ADDR);
1217 let (_, addr) = ptr.into_parts();
1221 Err(addr) => Err(addr.bytes()),
1225 /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1227 pub fn ptr_get_alloc_id(
1229 ptr: Pointer<Option<M::Provenance>>,
1230 ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1231 self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
1232 err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()