1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
10 use std::collections::VecDeque;
11 use std::convert::TryFrom;
15 use rustc_ast::ast::Mutability;
16 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
17 use rustc_hir::def_id::DefId;
18 use rustc_middle::ty::{self, Instance, ParamEnv, TyCtxt};
19 use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
22 AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, GlobalAlloc, GlobalId,
23 InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
25 use crate::util::pretty;
27 #[derive(Debug, PartialEq, Copy, Clone)]
28 pub enum MemoryKind<T> {
29 /// Stack memory. Error if deallocated except during a stack pop.
31 /// Memory backing vtables. Error if ever deallocated.
33 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
35 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
39 impl<T: MayLeak> MayLeak for MemoryKind<T> {
41 fn may_leak(self) -> bool {
43 MemoryKind::Stack => false,
44 MemoryKind::Vtable => true,
45 MemoryKind::CallerLocation => true,
46 MemoryKind::Machine(k) => k.may_leak(),
51 impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
52 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54 MemoryKind::Stack => write!(f, "stack variable"),
55 MemoryKind::Vtable => write!(f, "vtable"),
56 MemoryKind::CallerLocation => write!(f, "caller location"),
57 MemoryKind::Machine(m) => write!(f, "{}", m),
62 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
63 #[derive(Debug, Copy, Clone)]
65 /// Allocation must be live and not a function pointer.
67 /// Allocations needs to be live, but may be a function pointer.
69 /// Allocation may be dead.
73 /// The value of a function pointer.
74 #[derive(Debug, Copy, Clone)]
75 pub enum FnVal<'tcx, Other> {
76 Instance(Instance<'tcx>),
80 impl<'tcx, Other> FnVal<'tcx, Other> {
81 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
83 FnVal::Instance(instance) => Ok(instance),
85 throw_unsup_format!("'foreign' function pointers are not supported in this context")
91 // `Memory` has to depend on the `Machine` because some of its operations
92 // (e.g., `get`) call a `Machine` hook.
93 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
94 /// Allocations local to this instance of the miri engine. The kind
95 /// helps ensure that the same mechanism is used for allocation and
96 /// deallocation. When an allocation is not found here, it is a
97 /// global and looked up in the `tcx` for read access. Some machines may
98 /// have to mutate this map even on a read-only access to a global (because
99 /// they do pointer provenance tracking and the allocations in `tcx` have
100 /// the wrong type), so we let the machine override this type.
101 /// Either way, if the machine allows writing to a global, doing so will
102 /// create a copy of the global allocation here.
103 // FIXME: this should not be public, but interning currently needs access to it
104 pub(super) alloc_map: M::MemoryMap,
106 /// Map for "extra" function pointers.
107 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
109 /// To be able to compare pointers with NULL, and to check alignment for accesses
110 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
111 /// that do not exist any more.
112 // FIXME: this should not be public, but interning currently needs access to it
113 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
115 /// Extra data added by the machine.
116 pub extra: M::MemoryExtra,
118 /// Lets us implement `HasDataLayout`, which is awfully convenient.
119 pub tcx: TyCtxt<'tcx>,
122 /// Return the `tcx` allocation containing the initial value of the given static
123 pub fn get_static(tcx: TyCtxt<'tcx>, def_id: DefId) -> InterpResult<'tcx, &'tcx Allocation> {
124 trace!("get_static: Need to compute {:?}", def_id);
125 let instance = Instance::mono(tcx, def_id);
126 let gid = GlobalId { instance, promoted: None };
127 // Use the raw query here to break validation cycles. Later uses of the static
128 // will call the full query anyway.
129 let raw_const = tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid))?;
130 Ok(tcx.global_alloc(raw_const.alloc_id).unwrap_memory())
133 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
135 fn data_layout(&self) -> &TargetDataLayout {
136 &self.tcx.data_layout
140 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
141 pub fn new(tcx: TyCtxt<'tcx>, extra: M::MemoryExtra) -> Self {
143 alloc_map: M::MemoryMap::default(),
144 extra_fn_ptr_map: FxHashMap::default(),
145 dead_alloc_map: FxHashMap::default(),
151 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
152 /// the machine pointer to the allocation. Must never be used
153 /// for any other pointers, nor for TLS statics.
155 /// Using the resulting pointer represents a *direct* access to that memory
156 /// (e.g. by directly using a `static`),
157 /// as opposed to access through a pointer that was created by the program.
159 /// This function can fail only if `ptr` points to an `extern static`.
161 pub fn global_base_pointer(
164 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
165 // We need to handle `extern static`.
166 let ptr = match self.tcx.get_global_alloc(ptr.alloc_id) {
167 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
168 bug!("global memory cannot point to thread-local static")
170 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
171 ptr.alloc_id = M::extern_static_alloc_id(self, def_id)?;
175 // No need to change the `AllocId`.
179 // And we need to get the tag.
180 let tag = M::tag_global_base_pointer(&self.extra, ptr.alloc_id);
181 Ok(ptr.with_tag(tag))
184 pub fn create_fn_alloc(
186 fn_val: FnVal<'tcx, M::ExtraFnVal>,
187 ) -> Pointer<M::PointerTag> {
188 let id = match fn_val {
189 FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
190 FnVal::Other(extra) => {
191 // FIXME(RalfJung): Should we have a cache here?
192 let id = self.tcx.reserve_alloc_id();
193 let old = self.extra_fn_ptr_map.insert(id, extra);
194 assert!(old.is_none());
198 // Functions are global allocations, so make sure we get the right base pointer.
199 // We know this is not an `extern static` so this cannot fail.
200 self.global_base_pointer(Pointer::from(id)).unwrap()
207 kind: MemoryKind<M::MemoryKind>,
208 ) -> Pointer<M::PointerTag> {
209 let alloc = Allocation::uninit(size, align);
210 self.allocate_with(alloc, kind)
213 pub fn allocate_bytes(
216 kind: MemoryKind<M::MemoryKind>,
217 ) -> Pointer<M::PointerTag> {
218 let alloc = Allocation::from_byte_aligned_bytes(bytes);
219 self.allocate_with(alloc, kind)
222 pub fn allocate_with(
225 kind: MemoryKind<M::MemoryKind>,
226 ) -> Pointer<M::PointerTag> {
227 let id = self.tcx.reserve_alloc_id();
230 M::GLOBAL_KIND.map(MemoryKind::Machine),
231 "dynamically allocating global memory"
233 // This is a new allocation, not a new global one, so no `global_base_ptr`.
234 let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
235 self.alloc_map.insert(id, (kind, alloc.into_owned()));
236 Pointer::from(id).with_tag(tag)
241 ptr: Pointer<M::PointerTag>,
242 old_size_and_align: Option<(Size, Align)>,
245 kind: MemoryKind<M::MemoryKind>,
246 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
247 if ptr.offset.bytes() != 0 {
249 "reallocating {:?} which does not point to the beginning of an object",
254 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
255 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
256 let new_ptr = self.allocate(new_size, new_align, kind);
257 let old_size = match old_size_and_align {
258 Some((size, _align)) => size,
259 None => self.get_raw(ptr.alloc_id)?.size,
261 self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
262 self.deallocate(ptr, old_size_and_align, kind)?;
267 /// Deallocate a local, or do nothing if that local has been made into a global.
268 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
269 // The allocation might be already removed by global interning.
270 // This can only really happen in the CTFE instance, not in miri.
271 if self.alloc_map.contains_key(&ptr.alloc_id) {
272 self.deallocate(ptr, None, MemoryKind::Stack)
280 ptr: Pointer<M::PointerTag>,
281 old_size_and_align: Option<(Size, Align)>,
282 kind: MemoryKind<M::MemoryKind>,
283 ) -> InterpResult<'tcx> {
284 trace!("deallocating: {}", ptr.alloc_id);
286 if ptr.offset.bytes() != 0 {
288 "deallocating {:?} which does not point to the beginning of an object",
293 M::before_deallocation(&mut self.extra, ptr.alloc_id)?;
295 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
296 Some(alloc) => alloc,
298 // Deallocating global memory -- always an error
299 return Err(match self.tcx.get_global_alloc(ptr.alloc_id) {
300 Some(GlobalAlloc::Function(..)) => err_ub_format!("deallocating a function"),
301 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
302 err_ub_format!("deallocating static memory")
304 None => err_ub!(PointerUseAfterFree(ptr.alloc_id)),
310 if alloc_kind != kind {
312 "deallocating {} memory using {} deallocation operation",
317 if let Some((size, align)) = old_size_and_align {
318 if size != alloc.size || align != alloc.align {
320 "incorrect layout on deallocation: allocation has size {} and alignment {}, but gave size {} and alignment {}",
329 // Let the machine take some extra action
330 let size = alloc.size;
331 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
333 // Don't forget to remember size and align of this now-dead allocation
334 let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size, alloc.align));
336 bug!("Nothing can be deallocated twice");
342 /// Check if the given scalar is allowed to do a memory access of given `size`
343 /// and `align`. On success, returns `None` for zero-sized accesses (where
344 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
345 /// Crucially, if the input is a `Pointer`, we will test it for liveness
346 /// *even if* the size is 0.
348 /// Everyone accessing memory based on a `Scalar` should use this method to get the
349 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
350 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
353 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
354 /// this method is still appropriate.
356 pub fn check_ptr_access(
358 sptr: Scalar<M::PointerTag>,
361 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
362 let align = M::enforce_alignment(&self.extra).then_some(align);
363 self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
366 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
367 /// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
368 /// the error message for the out-of-bounds case.
369 pub fn check_ptr_access_align(
371 sptr: Scalar<M::PointerTag>,
373 align: Option<Align>,
374 msg: CheckInAllocMsg,
375 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
376 fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
377 if offset % align.bytes() == 0 {
380 // The biggest power of two through which `offset` is divisible.
381 let offset_pow2 = 1 << offset.trailing_zeros();
382 throw_ub!(AlignmentCheckFailed {
383 has: Align::from_bytes(offset_pow2).unwrap(),
389 // Normalize to a `Pointer` if we definitely need one.
390 let normalized = if size.bytes() == 0 {
391 // Can be an integer, just take what we got. We do NOT `force_bits` here;
392 // if this is already a `Pointer` we want to do the bounds checks!
395 // A "real" access, we must get a pointer.
396 Scalar::from(self.force_ptr(sptr)?)
398 Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
400 let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
401 assert!(size.bytes() == 0);
404 throw_ub!(DanglingIntPointer(0, msg))
407 if let Some(align) = align {
408 check_offset_align(bits, align)?;
413 let (allocation_size, alloc_align) =
414 self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
415 // Test bounds. This also ensures non-NULL.
416 // It is sufficient to check this for the end pointer. The addition
417 // checks for overflow.
418 let end_ptr = ptr.offset(size, self)?;
419 if end_ptr.offset > allocation_size {
421 throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
423 // Test align. Check this last; if both bounds and alignment are violated
424 // we want the error to be about the bounds.
425 if let Some(align) = align {
426 if alloc_align.bytes() < align.bytes() {
427 // The allocation itself is not aligned enough.
428 // FIXME: Alignment check is too strict, depending on the base address that
429 // got picked we might be aligned even if this check fails.
430 // We instead have to fall back to converting to an integer and checking
431 // the "real" alignment.
432 throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
434 check_offset_align(ptr.offset.bytes(), align)?;
437 // We can still be zero-sized in this branch, in which case we have to
439 if size.bytes() == 0 { None } else { Some(ptr) }
444 /// Test if the pointer might be NULL.
445 pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
446 let (size, _align) = self
447 .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
448 .expect("alloc info with MaybeDead cannot fail");
449 // If the pointer is out-of-bounds, it may be null.
450 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
455 /// Allocation accessors
456 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
457 /// Helper function to obtain a global (tcx) allocation.
458 /// This attempts to return a reference to an existing allocation if
459 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
460 /// this machine use the same pointer tag, so it is indirected through
461 /// `M::tag_allocation`.
463 memory_extra: &M::MemoryExtra,
467 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
468 let (alloc, def_id) = match tcx.get_global_alloc(id) {
469 Some(GlobalAlloc::Memory(mem)) => {
470 // Memory of a constant or promoted or anonymous memory referenced by a static.
473 Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
474 None => throw_ub!(PointerUseAfterFree(id)),
475 Some(GlobalAlloc::Static(def_id)) => {
476 assert!(tcx.is_static(def_id));
477 assert!(!tcx.is_thread_local_static(def_id));
478 // Notice that every static has two `AllocId` that will resolve to the same
479 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
480 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
481 // `const_eval_raw` and it is the "resolved" ID.
482 // The resolved ID is never used by the interpreted program, it is hidden.
483 // This is relied upon for soundness of const-patterns; a pointer to the resolved
484 // ID would "sidestep" the checks that make sure consts do not point to statics!
485 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
486 // contains a reference to memory that was created during its evaluation (i.e., not
487 // to another static), those inner references only exist in "resolved" form.
488 if tcx.is_foreign_item(def_id) {
489 throw_unsup!(ReadExternStatic(def_id));
492 (get_static(tcx, def_id)?, Some(def_id))
495 M::before_access_global(memory_extra, id, alloc, def_id, is_write)?;
496 let alloc = Cow::Borrowed(alloc);
497 // We got tcx memory. Let the machine initialize its "extra" stuff.
498 let (alloc, tag) = M::init_allocation_extra(
500 id, // always use the ID we got as input, not the "hidden" one.
502 M::GLOBAL_KIND.map(MemoryKind::Machine),
504 // Sanity check that this is the same pointer we would have gotten via `global_base_pointer`.
505 debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id));
509 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
510 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
514 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
515 // The error type of the inner closure here is somewhat funny. We have two
516 // ways of "erroring": An actual error, or because we got a reference from
517 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
518 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
519 let a = self.alloc_map.get_or(id, || {
520 let alloc = Self::get_global_alloc(&self.extra, self.tcx, id, /*is_write*/ false)
523 Cow::Borrowed(alloc) => {
524 // We got a ref, cheaply return that as an "error" so that the
525 // map does not get mutated.
528 Cow::Owned(alloc) => {
529 // Need to put it into the map and return a ref to that
530 let kind = M::GLOBAL_KIND.expect(
531 "I got a global allocation that I have to copy but the machine does \
532 not expect that to happen",
534 Ok((MemoryKind::Machine(kind), alloc))
538 // Now unpack that funny error type
545 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
546 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
550 ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
552 let memory_extra = &self.extra;
553 let a = self.alloc_map.get_mut_or(id, || {
554 // Need to make a copy, even if `get_global_alloc` is able
555 // to give us a cheap reference.
556 let alloc = Self::get_global_alloc(memory_extra, tcx, id, /*is_write*/ true)?;
557 if alloc.mutability == Mutability::Not {
558 throw_ub!(WriteToReadOnly(id))
560 let kind = M::GLOBAL_KIND.expect(
561 "I got a global allocation that I have to copy but the machine does \
562 not expect that to happen",
564 Ok((MemoryKind::Machine(kind), alloc.into_owned()))
566 // Unpack the error type manually because type inference doesn't
567 // work otherwise (and we cannot help it because `impl Trait`)
572 if a.mutability == Mutability::Not {
573 throw_ub!(WriteToReadOnly(id))
580 /// Obtain the size and alignment of an allocation, even if that allocation has
581 /// been deallocated.
583 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
584 pub fn get_size_and_align(
587 liveness: AllocCheck,
588 ) -> InterpResult<'static, (Size, Align)> {
589 // # Regular allocations
590 // Don't use `self.get_raw` here as that will
591 // a) cause cycles in case `id` refers to a static
592 // b) duplicate a global's allocation in miri
593 if let Some((_, alloc)) = self.alloc_map.get(id) {
594 return Ok((alloc.size, alloc.align));
597 // # Function pointers
598 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
599 if self.get_fn_alloc(id).is_some() {
600 return if let AllocCheck::Dereferenceable = liveness {
601 // The caller requested no function pointers.
602 throw_ub!(DerefFunctionPointer(id))
604 Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
609 // Can't do this in the match argument, we may get cycle errors since the lock would
610 // be held throughout the match.
611 match self.tcx.get_global_alloc(id) {
612 Some(GlobalAlloc::Static(did)) => {
613 assert!(!self.tcx.is_thread_local_static(did));
614 // Use size and align of the type.
615 let ty = self.tcx.type_of(did);
616 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
617 Ok((layout.size, layout.align.abi))
619 Some(GlobalAlloc::Memory(alloc)) => {
620 // Need to duplicate the logic here, because the global allocations have
621 // different associated types than the interpreter-local ones.
622 Ok((alloc.size, alloc.align))
624 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
625 // The rest must be dead.
627 if let AllocCheck::MaybeDead = liveness {
628 // Deallocated pointers are allowed, we should be able to find
633 .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
635 throw_ub!(PointerUseAfterFree(id))
641 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
642 trace!("reading fn ptr: {}", id);
643 if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
644 Some(FnVal::Other(*extra))
646 match self.tcx.get_global_alloc(id) {
647 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
655 ptr: Scalar<M::PointerTag>,
656 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
657 let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
658 if ptr.offset.bytes() != 0 {
659 throw_ub!(InvalidFunctionPointer(ptr.erase_tag()))
661 self.get_fn_alloc(ptr.alloc_id)
662 .ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into())
665 pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
666 self.get_raw_mut(id)?.mutability = Mutability::Not;
670 /// Print an allocation and all allocations it points to, recursively.
671 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
672 /// control for this.
673 pub fn dump_alloc(&self, id: AllocId) {
674 self.dump_allocs(vec![id]);
677 /// Print a list of allocations and all allocations they point to, recursively.
678 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
679 /// control for this.
680 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
681 // Cannot be a closure because it is generic in `Tag`, `Extra`.
682 fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>(
684 allocs_to_print: &mut VecDeque<AllocId>,
685 alloc: &Allocation<Tag, Extra>,
687 for &(_, target_id) in alloc.relocations().values() {
688 allocs_to_print.push_back(target_id);
690 pretty::write_allocation(tcx, alloc, &mut std::io::stderr()).unwrap();
695 let mut allocs_to_print = VecDeque::from(allocs);
696 // `allocs_printed` contains all allocations that we have already printed.
697 let mut allocs_printed = FxHashSet::default();
699 while let Some(id) = allocs_to_print.pop_front() {
700 if !allocs_printed.insert(id) {
701 // Already printed, so skip this.
706 match self.alloc_map.get(id) {
707 Some(&(kind, ref alloc)) => {
709 eprint!(" ({}, ", kind);
710 write_allocation_track_relocs(self.tcx, &mut allocs_to_print, alloc);
714 match self.tcx.get_global_alloc(id) {
715 Some(GlobalAlloc::Memory(alloc)) => {
716 eprint!(" (unchanged global, ");
717 write_allocation_track_relocs(self.tcx, &mut allocs_to_print, alloc);
719 Some(GlobalAlloc::Function(func)) => {
720 eprint!(" (fn: {})", func);
722 Some(GlobalAlloc::Static(did)) => {
723 eprint!(" (static: {})", self.tcx.def_path_str(did));
726 eprint!(" (deallocated)");
735 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
736 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
737 pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
738 // Collect the set of allocations that are *reachable* from `Global` allocations.
740 let mut reachable = FxHashSet::default();
741 let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
742 let mut todo: Vec<_> = self.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
743 if Some(kind) == global_kind { Some(id) } else { None }
745 todo.extend(static_roots);
746 while let Some(id) = todo.pop() {
747 if reachable.insert(id) {
748 // This is a new allocation, add its relocations to `todo`.
749 if let Some((_, alloc)) = self.alloc_map.get(id) {
750 todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
757 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
758 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
759 if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
763 eprintln!("The following memory was leaked:");
764 self.dump_allocs(leaks);
769 /// This is used by [priroda](https://github.com/oli-obk/priroda)
770 pub fn alloc_map(&self) -> &M::MemoryMap {
775 /// Reading and writing.
776 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
777 /// Reads the given number of bytes from memory. Returns them as a slice.
779 /// Performs appropriate bounds checks.
780 pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
781 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
783 None => return Ok(&[]), // zero-sized access
785 self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
788 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
790 /// Performs appropriate bounds checks.
791 pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
792 let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
793 self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
796 /// Reads a 0x0000-terminated u16-sequence from memory. Returns them as a Vec<u16>.
797 /// Terminator 0x0000 is not included in the returned Vec<u16>.
799 /// Performs appropriate bounds checks.
800 pub fn read_wide_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, Vec<u16>> {
801 let size_2bytes = Size::from_bytes(2);
802 let align_2bytes = Align::from_bytes(2).unwrap();
803 // We need to read at least 2 bytes, so we *need* a ptr.
804 let mut ptr = self.force_ptr(ptr)?;
805 let allocation = self.get_raw(ptr.alloc_id)?;
806 let mut u16_seq = Vec::new();
810 .check_ptr_access(ptr.into(), size_2bytes, align_2bytes)?
811 .expect("cannot be a ZST");
812 let single_u16 = allocation.read_scalar(self, ptr, size_2bytes)?.to_u16()?;
813 if single_u16 != 0x0000 {
814 u16_seq.push(single_u16);
815 ptr = ptr.offset(size_2bytes, self)?;
823 /// Writes the given stream of bytes into memory.
825 /// Performs appropriate bounds checks.
828 ptr: Scalar<M::PointerTag>,
829 src: impl IntoIterator<Item = u8>,
830 ) -> InterpResult<'tcx> {
831 let mut src = src.into_iter();
832 let size = Size::from_bytes(src.size_hint().0);
833 // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
834 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
838 src.next().expect_none("iterator said it was empty but returned an element");
843 self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
846 /// Writes the given stream of u16s into memory.
848 /// Performs appropriate bounds checks.
851 ptr: Scalar<M::PointerTag>,
852 src: impl IntoIterator<Item = u16>,
853 ) -> InterpResult<'tcx> {
854 let mut src = src.into_iter();
855 let (lower, upper) = src.size_hint();
856 let len = upper.expect("can only write bounded iterators");
857 assert_eq!(lower, len, "can only write iterators with a precise length");
859 let size = Size::from_bytes(lower);
860 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(2).unwrap())? {
864 src.next().expect_none("iterator said it was empty but returned an element");
869 let allocation = self.get_raw_mut(ptr.alloc_id)?;
872 let val = Scalar::from_u16(
873 src.next().expect("iterator was shorter than it said it would be"),
875 let offset_ptr = ptr.offset(Size::from_bytes(idx) * 2, &tcx)?; // `Size` multiplication
876 allocation.write_scalar(&tcx, offset_ptr, val.into(), Size::from_bytes(2))?;
878 src.next().expect_none("iterator was longer than it said it would be");
882 /// Expects the caller to have checked bounds and alignment.
885 src: Pointer<M::PointerTag>,
886 dest: Pointer<M::PointerTag>,
888 nonoverlapping: bool,
889 ) -> InterpResult<'tcx> {
890 self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
893 /// Expects the caller to have checked bounds and alignment.
894 pub fn copy_repeatedly(
896 src: Pointer<M::PointerTag>,
897 dest: Pointer<M::PointerTag>,
900 nonoverlapping: bool,
901 ) -> InterpResult<'tcx> {
902 // first copy the relocations to a temporary buffer, because
903 // `get_bytes_mut` will clear the relocations, which is correct,
904 // since we don't want to keep any relocations at the target.
905 // (`get_bytes_with_undef_and_ptr` below checks that there are no
906 // relocations overlapping the edges; those would not be handled correctly).
908 self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
912 // This checks relocation edges on the src.
914 self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
916 self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
918 // If `dest_bytes` is empty we just optimize to not run anything for zsts.
920 if dest_bytes.is_empty() {
924 let dest_bytes = dest_bytes.as_mut_ptr();
926 // Prepare a copy of the initialization mask.
927 let compressed = self.get_raw(src.alloc_id)?.compress_undef_range(src, size);
929 if compressed.no_bytes_init() {
930 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
931 // is marked as unititialized but we otherwise omit changing the byte representation which may
932 // be arbitrary for uninitialized bytes.
933 // This also avoids writing to the target bytes so that the backing allocation is never
934 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
935 // operating system this can avoid physically allocating the page.
936 let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
937 dest_alloc.mark_init(dest, size * length, false); // `Size` multiplication
938 dest_alloc.mark_relocation_range(relocations);
942 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
943 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
944 // `dest` could possibly overlap.
945 // The pointers above remain valid even if the `HashMap` table is moved around because they
946 // point into the `Vec` storing the bytes.
948 if src.alloc_id == dest.alloc_id {
951 if (src.offset <= dest.offset && src.offset + size > dest.offset)
952 || (dest.offset <= src.offset && dest.offset + size > src.offset)
954 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
961 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
967 ptr::copy_nonoverlapping(
969 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
976 // now fill in all the data
977 self.get_raw_mut(dest.alloc_id)?.mark_compressed_init_range(
984 // copy the relocations to the destination
985 self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
991 /// Machine pointer introspection.
992 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
995 scalar: Scalar<M::PointerTag>,
996 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
998 Scalar::Ptr(ptr) => Ok(ptr),
999 _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
1005 scalar: Scalar<M::PointerTag>,
1007 ) -> InterpResult<'tcx, u128> {
1008 match scalar.to_bits_or_ptr(size, self) {
1009 Ok(bits) => Ok(bits),
1010 Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),