1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
10 use std::collections::VecDeque;
11 use std::convert::TryFrom;
15 use rustc_ast::ast::Mutability;
16 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
17 use rustc_middle::ty::{self, query::TyCtxtAt, Instance, ParamEnv};
18 use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
21 AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, GlobalAlloc, GlobalId,
22 InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
24 use crate::util::pretty;
26 #[derive(Debug, PartialEq, Copy, Clone)]
27 pub enum MemoryKind<T> {
28 /// Stack memory. Error if deallocated except during a stack pop.
30 /// Memory backing vtables. Error if ever deallocated.
32 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
34 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
38 impl<T: MayLeak> MayLeak for MemoryKind<T> {
40 fn may_leak(self) -> bool {
42 MemoryKind::Stack => false,
43 MemoryKind::Vtable => true,
44 MemoryKind::CallerLocation => true,
45 MemoryKind::Machine(k) => k.may_leak(),
50 impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
51 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53 MemoryKind::Stack => write!(f, "stack variable"),
54 MemoryKind::Vtable => write!(f, "vtable"),
55 MemoryKind::CallerLocation => write!(f, "caller location"),
56 MemoryKind::Machine(m) => write!(f, "{}", m),
61 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
62 #[derive(Debug, Copy, Clone)]
64 /// Allocation must be live and not a function pointer.
66 /// Allocations needs to be live, but may be a function pointer.
68 /// Allocation may be dead.
72 /// The value of a function pointer.
73 #[derive(Debug, Copy, Clone)]
74 pub enum FnVal<'tcx, Other> {
75 Instance(Instance<'tcx>),
79 impl<'tcx, Other> FnVal<'tcx, Other> {
80 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
82 FnVal::Instance(instance) => Ok(instance),
84 throw_unsup_format!("'foreign' function pointers are not supported in this context")
90 // `Memory` has to depend on the `Machine` because some of its operations
91 // (e.g., `get`) call a `Machine` hook.
92 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
93 /// Allocations local to this instance of the miri engine. The kind
94 /// helps ensure that the same mechanism is used for allocation and
95 /// deallocation. When an allocation is not found here, it is a
96 /// global and looked up in the `tcx` for read access. Some machines may
97 /// have to mutate this map even on a read-only access to a global (because
98 /// they do pointer provenance tracking and the allocations in `tcx` have
99 /// the wrong type), so we let the machine override this type.
100 /// Either way, if the machine allows writing to a global, doing so will
101 /// create a copy of the global allocation here.
102 // FIXME: this should not be public, but interning currently needs access to it
103 pub(super) alloc_map: M::MemoryMap,
105 /// Map for "extra" function pointers.
106 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
108 /// To be able to compare pointers with NULL, and to check alignment for accesses
109 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
110 /// that do not exist any more.
111 // FIXME: this should not be public, but interning currently needs access to it
112 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
114 /// Extra data added by the machine.
115 pub extra: M::MemoryExtra,
117 /// Lets us implement `HasDataLayout`, which is awfully convenient.
118 pub tcx: TyCtxtAt<'tcx>,
121 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
123 fn data_layout(&self) -> &TargetDataLayout {
124 &self.tcx.data_layout
128 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
129 pub fn new(tcx: TyCtxtAt<'tcx>, extra: M::MemoryExtra) -> Self {
131 alloc_map: M::MemoryMap::default(),
132 extra_fn_ptr_map: FxHashMap::default(),
133 dead_alloc_map: FxHashMap::default(),
139 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
140 /// the *canonical* machine pointer to the allocation. Must never be used
141 /// for any other pointers!
143 /// This represents a *direct* access to that memory, as opposed to access
144 /// through a pointer that was created by the program.
146 pub fn tag_global_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
147 let id = M::canonical_alloc_id(self, ptr.alloc_id);
148 ptr.with_tag(M::tag_global_base_pointer(&self.extra, id))
151 pub fn create_fn_alloc(
153 fn_val: FnVal<'tcx, M::ExtraFnVal>,
154 ) -> Pointer<M::PointerTag> {
155 let id = match fn_val {
156 FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
157 FnVal::Other(extra) => {
158 // FIXME(RalfJung): Should we have a cache here?
159 let id = self.tcx.reserve_alloc_id();
160 let old = self.extra_fn_ptr_map.insert(id, extra);
161 assert!(old.is_none());
165 self.tag_global_base_pointer(Pointer::from(id))
172 kind: MemoryKind<M::MemoryKind>,
173 ) -> Pointer<M::PointerTag> {
174 let alloc = Allocation::undef(size, align);
175 self.allocate_with(alloc, kind)
178 pub fn allocate_bytes(
181 kind: MemoryKind<M::MemoryKind>,
182 ) -> Pointer<M::PointerTag> {
183 let alloc = Allocation::from_byte_aligned_bytes(bytes);
184 self.allocate_with(alloc, kind)
187 pub fn allocate_with(
190 kind: MemoryKind<M::MemoryKind>,
191 ) -> Pointer<M::PointerTag> {
192 let id = self.tcx.reserve_alloc_id();
195 M::GLOBAL_KIND.map(MemoryKind::Machine),
196 "dynamically allocating global memory"
198 let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
199 self.alloc_map.insert(id, (kind, alloc.into_owned()));
200 Pointer::from(id).with_tag(tag)
205 ptr: Pointer<M::PointerTag>,
206 old_size_and_align: Option<(Size, Align)>,
209 kind: MemoryKind<M::MemoryKind>,
210 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
211 if ptr.offset.bytes() != 0 {
213 "reallocating {:?} which does not point to the beginning of an object",
218 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
219 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
220 let new_ptr = self.allocate(new_size, new_align, kind);
221 let old_size = match old_size_and_align {
222 Some((size, _align)) => size,
223 None => self.get_raw(ptr.alloc_id)?.size,
225 self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
226 self.deallocate(ptr, old_size_and_align, kind)?;
231 /// Deallocate a local, or do nothing if that local has been made into a global.
232 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
233 // The allocation might be already removed by global interning.
234 // This can only really happen in the CTFE instance, not in miri.
235 if self.alloc_map.contains_key(&ptr.alloc_id) {
236 self.deallocate(ptr, None, MemoryKind::Stack)
244 ptr: Pointer<M::PointerTag>,
245 old_size_and_align: Option<(Size, Align)>,
246 kind: MemoryKind<M::MemoryKind>,
247 ) -> InterpResult<'tcx> {
248 trace!("deallocating: {}", ptr.alloc_id);
250 if ptr.offset.bytes() != 0 {
252 "deallocating {:?} which does not point to the beginning of an object",
257 M::before_deallocation(&mut self.extra, ptr.alloc_id)?;
259 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
260 Some(alloc) => alloc,
262 // Deallocating global memory -- always an error
263 return Err(match self.tcx.get_global_alloc(ptr.alloc_id) {
264 Some(GlobalAlloc::Function(..)) => err_ub_format!("deallocating a function"),
265 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
266 err_ub_format!("deallocating static memory")
268 None => err_ub!(PointerUseAfterFree(ptr.alloc_id)),
274 if alloc_kind != kind {
276 "deallocating {} memory using {} deallocation operation",
281 if let Some((size, align)) = old_size_and_align {
282 if size != alloc.size || align != alloc.align {
284 "incorrect layout on deallocation: allocation has size {} and alignment {}, but gave size {} and alignment {}",
293 // Let the machine take some extra action
294 let size = alloc.size;
295 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
297 // Don't forget to remember size and align of this now-dead allocation
298 let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size, alloc.align));
300 bug!("Nothing can be deallocated twice");
306 /// Check if the given scalar is allowed to do a memory access of given `size`
307 /// and `align`. On success, returns `None` for zero-sized accesses (where
308 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
309 /// Crucially, if the input is a `Pointer`, we will test it for liveness
310 /// *even if* the size is 0.
312 /// Everyone accessing memory based on a `Scalar` should use this method to get the
313 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
314 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
317 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
318 /// this method is still appropriate.
320 pub fn check_ptr_access(
322 sptr: Scalar<M::PointerTag>,
325 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
326 let align = M::enforce_alignment(&self.extra).then_some(align);
327 self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
330 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
331 /// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
332 /// the error message for the out-of-bounds case.
333 pub fn check_ptr_access_align(
335 sptr: Scalar<M::PointerTag>,
337 align: Option<Align>,
338 msg: CheckInAllocMsg,
339 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
340 fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
341 if offset % align.bytes() == 0 {
344 // The biggest power of two through which `offset` is divisible.
345 let offset_pow2 = 1 << offset.trailing_zeros();
346 throw_ub!(AlignmentCheckFailed {
347 has: Align::from_bytes(offset_pow2).unwrap(),
353 // Normalize to a `Pointer` if we definitely need one.
354 let normalized = if size.bytes() == 0 {
355 // Can be an integer, just take what we got. We do NOT `force_bits` here;
356 // if this is already a `Pointer` we want to do the bounds checks!
359 // A "real" access, we must get a pointer.
360 Scalar::from(self.force_ptr(sptr)?)
362 Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
364 let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
365 assert!(size.bytes() == 0);
368 throw_ub!(DanglingIntPointer(0, msg))
371 if let Some(align) = align {
372 check_offset_align(bits, align)?;
377 let (allocation_size, alloc_align) =
378 self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
379 // Test bounds. This also ensures non-NULL.
380 // It is sufficient to check this for the end pointer. The addition
381 // checks for overflow.
382 let end_ptr = ptr.offset(size, self)?;
383 if end_ptr.offset > allocation_size {
385 throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
387 // Test align. Check this last; if both bounds and alignment are violated
388 // we want the error to be about the bounds.
389 if let Some(align) = align {
390 if alloc_align.bytes() < align.bytes() {
391 // The allocation itself is not aligned enough.
392 // FIXME: Alignment check is too strict, depending on the base address that
393 // got picked we might be aligned even if this check fails.
394 // We instead have to fall back to converting to an integer and checking
395 // the "real" alignment.
396 throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
398 check_offset_align(ptr.offset.bytes(), align)?;
401 // We can still be zero-sized in this branch, in which case we have to
403 if size.bytes() == 0 { None } else { Some(ptr) }
408 /// Test if the pointer might be NULL.
409 pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
410 let (size, _align) = self
411 .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
412 .expect("alloc info with MaybeDead cannot fail");
413 // If the pointer is out-of-bounds, it may be null.
414 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
419 /// Allocation accessors
420 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
421 /// Helper function to obtain a global (tcx) allocation.
422 /// This attempts to return a reference to an existing allocation if
423 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
424 /// this machine use the same pointer tag, so it is indirected through
425 /// `M::tag_allocation`.
427 memory_extra: &M::MemoryExtra,
431 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
432 let (alloc, def_id) = match tcx.get_global_alloc(id) {
433 Some(GlobalAlloc::Memory(mem)) => {
434 // Memory of a constant or promoted or anonymous memory referenced by a static.
437 Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
438 None => throw_ub!(PointerUseAfterFree(id)),
439 Some(GlobalAlloc::Static(def_id)) => {
440 // Notice that every static has two `AllocId` that will resolve to the same
441 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
442 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
443 // `const_eval_raw` and it is the "resolved" ID.
444 // The resolved ID is never used by the interpreted program, it is hidden.
445 // This is relied upon for soundness of const-patterns; a pointer to the resolved
446 // ID would "sidestep" the checks that make sure consts do not point to statics!
447 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
448 // contains a reference to memory that was created during its evaluation (i.e., not
449 // to another static), those inner references only exist in "resolved" form.
451 // Assumes `id` is already canonical.
452 if tcx.is_foreign_item(def_id) {
453 trace!("get_global_alloc: foreign item {:?}", def_id);
454 throw_unsup!(ReadForeignStatic(def_id))
456 trace!("get_global_alloc: Need to compute {:?}", def_id);
457 let instance = Instance::mono(tcx.tcx, def_id);
458 let gid = GlobalId { instance, promoted: None };
459 // Use the raw query here to break validation cycles. Later uses of the static
460 // will call the full query anyway.
462 tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
463 // no need to report anything, the const_eval call takes care of that
465 assert!(tcx.is_static(def_id));
468 // Make sure we use the ID of the resolved memory, not the lazy one!
469 let id = raw_const.alloc_id;
470 let allocation = tcx.global_alloc(id).unwrap_memory();
472 (allocation, Some(def_id))
475 M::before_access_global(memory_extra, id, alloc, def_id, is_write)?;
476 let alloc = Cow::Borrowed(alloc);
477 // We got tcx memory. Let the machine initialize its "extra" stuff.
478 let (alloc, tag) = M::init_allocation_extra(
480 id, // always use the ID we got as input, not the "hidden" one.
482 M::GLOBAL_KIND.map(MemoryKind::Machine),
484 debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id));
488 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
489 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
493 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
494 let id = M::canonical_alloc_id(self, id);
495 // The error type of the inner closure here is somewhat funny. We have two
496 // ways of "erroring": An actual error, or because we got a reference from
497 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
498 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
499 let a = self.alloc_map.get_or(id, || {
500 let alloc = Self::get_global_alloc(&self.extra, self.tcx, id, /*is_write*/ false)
503 Cow::Borrowed(alloc) => {
504 // We got a ref, cheaply return that as an "error" so that the
505 // map does not get mutated.
508 Cow::Owned(alloc) => {
509 // Need to put it into the map and return a ref to that
510 let kind = M::GLOBAL_KIND.expect(
511 "I got a global allocation that I have to copy but the machine does \
512 not expect that to happen",
514 Ok((MemoryKind::Machine(kind), alloc))
518 // Now unpack that funny error type
525 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
526 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
530 ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
531 let id = M::canonical_alloc_id(self, id);
533 let memory_extra = &self.extra;
534 let a = self.alloc_map.get_mut_or(id, || {
535 // Need to make a copy, even if `get_global_alloc` is able
536 // to give us a cheap reference.
537 let alloc = Self::get_global_alloc(memory_extra, tcx, id, /*is_write*/ true)?;
538 if alloc.mutability == Mutability::Not {
539 throw_ub!(WriteToReadOnly(id))
541 let kind = M::GLOBAL_KIND.expect(
542 "I got a global allocation that I have to copy but the machine does \
543 not expect that to happen",
545 Ok((MemoryKind::Machine(kind), alloc.into_owned()))
547 // Unpack the error type manually because type inference doesn't
548 // work otherwise (and we cannot help it because `impl Trait`)
553 if a.mutability == Mutability::Not {
554 throw_ub!(WriteToReadOnly(id))
561 /// Obtain the size and alignment of an allocation, even if that allocation has
562 /// been deallocated.
564 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
565 pub fn get_size_and_align(
568 liveness: AllocCheck,
569 ) -> InterpResult<'static, (Size, Align)> {
570 let id = M::canonical_alloc_id(self, id);
571 // # Regular allocations
572 // Don't use `self.get_raw` here as that will
573 // a) cause cycles in case `id` refers to a static
574 // b) duplicate a global's allocation in miri
575 if let Some((_, alloc)) = self.alloc_map.get(id) {
576 return Ok((alloc.size, alloc.align));
579 // # Function pointers
580 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
581 if self.get_fn_alloc(id).is_some() {
582 return if let AllocCheck::Dereferenceable = liveness {
583 // The caller requested no function pointers.
584 throw_ub!(DerefFunctionPointer(id))
586 Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
591 // Can't do this in the match argument, we may get cycle errors since the lock would
592 // be held throughout the match.
593 match self.tcx.get_global_alloc(id) {
594 Some(GlobalAlloc::Static(did)) => {
595 // Use size and align of the type.
596 let ty = self.tcx.type_of(did);
597 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
598 Ok((layout.size, layout.align.abi))
600 Some(GlobalAlloc::Memory(alloc)) => {
601 // Need to duplicate the logic here, because the global allocations have
602 // different associated types than the interpreter-local ones.
603 Ok((alloc.size, alloc.align))
605 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
606 // The rest must be dead.
608 if let AllocCheck::MaybeDead = liveness {
609 // Deallocated pointers are allowed, we should be able to find
614 .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
616 throw_ub!(PointerUseAfterFree(id))
622 /// Assumes `id` is already canonical.
623 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
624 trace!("reading fn ptr: {}", id);
625 if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
626 Some(FnVal::Other(*extra))
628 match self.tcx.get_global_alloc(id) {
629 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
637 ptr: Scalar<M::PointerTag>,
638 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
639 let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
640 if ptr.offset.bytes() != 0 {
641 throw_ub!(InvalidFunctionPointer(ptr.erase_tag()))
643 let id = M::canonical_alloc_id(self, ptr.alloc_id);
644 self.get_fn_alloc(id).ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into())
647 pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
648 self.get_raw_mut(id)?.mutability = Mutability::Not;
652 /// Print an allocation and all allocations it points to, recursively.
653 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
654 /// control for this.
655 pub fn dump_alloc(&self, id: AllocId) {
656 self.dump_allocs(vec![id]);
659 /// Print a list of allocations and all allocations they point to, recursively.
660 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
661 /// control for this.
662 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
663 // Cannot be a closure because it is generic in `Tag`, `Extra`.
664 fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>(
666 allocs_to_print: &mut VecDeque<AllocId>,
667 alloc: &Allocation<Tag, Extra>,
669 for &(_, target_id) in alloc.relocations().values() {
670 allocs_to_print.push_back(target_id);
672 pretty::write_allocation(tcx.tcx, alloc, &mut std::io::stderr()).unwrap();
677 let mut allocs_to_print = VecDeque::from(allocs);
678 // `allocs_printed` contains all allocations that we have already printed.
679 let mut allocs_printed = FxHashSet::default();
681 while let Some(id) = allocs_to_print.pop_front() {
682 if !allocs_printed.insert(id) {
683 // Already printed, so skip this.
688 match self.alloc_map.get(id) {
689 Some(&(kind, ref alloc)) => {
691 eprint!(" ({}, ", kind);
692 write_allocation_track_relocs(self.tcx, &mut allocs_to_print, alloc);
696 match self.tcx.get_global_alloc(id) {
697 Some(GlobalAlloc::Memory(alloc)) => {
698 eprint!(" (unchanged global, ");
699 write_allocation_track_relocs(self.tcx, &mut allocs_to_print, alloc);
701 Some(GlobalAlloc::Function(func)) => {
702 eprint!(" (fn: {})", func);
704 Some(GlobalAlloc::Static(did)) => {
705 eprint!(" (static: {})", self.tcx.def_path_str(did));
708 eprint!(" (deallocated)");
717 pub fn leak_report(&self) -> usize {
718 // Collect the set of allocations that are *reachable* from `Global` allocations.
720 let mut reachable = FxHashSet::default();
721 let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
722 let mut todo: Vec<_> = self.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
723 if Some(kind) == global_kind { Some(id) } else { None }
725 while let Some(id) = todo.pop() {
726 if reachable.insert(id) {
727 // This is a new allocation, add its relocations to `todo`.
728 if let Some((_, alloc)) = self.alloc_map.get(id) {
729 todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
736 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
737 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
738 if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
742 eprintln!("The following memory was leaked:");
743 self.dump_allocs(leaks);
748 /// This is used by [priroda](https://github.com/oli-obk/priroda)
749 pub fn alloc_map(&self) -> &M::MemoryMap {
754 /// Reading and writing.
755 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
756 /// Reads the given number of bytes from memory. Returns them as a slice.
758 /// Performs appropriate bounds checks.
759 pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
760 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
762 None => return Ok(&[]), // zero-sized access
764 self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
767 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
769 /// Performs appropriate bounds checks.
770 pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
771 let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
772 self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
775 /// Reads a 0x0000-terminated u16-sequence from memory. Returns them as a Vec<u16>.
776 /// Terminator 0x0000 is not included in the returned Vec<u16>.
778 /// Performs appropriate bounds checks.
779 pub fn read_wide_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, Vec<u16>> {
780 let size_2bytes = Size::from_bytes(2);
781 let align_2bytes = Align::from_bytes(2).unwrap();
782 // We need to read at least 2 bytes, so we *need* a ptr.
783 let mut ptr = self.force_ptr(ptr)?;
784 let allocation = self.get_raw(ptr.alloc_id)?;
785 let mut u16_seq = Vec::new();
789 .check_ptr_access(ptr.into(), size_2bytes, align_2bytes)?
790 .expect("cannot be a ZST");
791 let single_u16 = allocation.read_scalar(self, ptr, size_2bytes)?.to_u16()?;
792 if single_u16 != 0x0000 {
793 u16_seq.push(single_u16);
794 ptr = ptr.offset(size_2bytes, self)?;
802 /// Writes the given stream of bytes into memory.
804 /// Performs appropriate bounds checks.
807 ptr: Scalar<M::PointerTag>,
808 src: impl IntoIterator<Item = u8>,
809 ) -> InterpResult<'tcx> {
810 let mut src = src.into_iter();
811 let size = Size::from_bytes(src.size_hint().0);
812 // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
813 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
817 src.next().expect_none("iterator said it was empty but returned an element");
821 let tcx = self.tcx.tcx;
822 self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
825 /// Writes the given stream of u16s into memory.
827 /// Performs appropriate bounds checks.
830 ptr: Scalar<M::PointerTag>,
831 src: impl IntoIterator<Item = u16>,
832 ) -> InterpResult<'tcx> {
833 let mut src = src.into_iter();
834 let (lower, upper) = src.size_hint();
835 let len = upper.expect("can only write bounded iterators");
836 assert_eq!(lower, len, "can only write iterators with a precise length");
838 let size = Size::from_bytes(lower);
839 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(2).unwrap())? {
843 src.next().expect_none("iterator said it was empty but returned an element");
847 let tcx = self.tcx.tcx;
848 let allocation = self.get_raw_mut(ptr.alloc_id)?;
851 let val = Scalar::from_u16(
852 src.next().expect("iterator was shorter than it said it would be"),
854 let offset_ptr = ptr.offset(Size::from_bytes(idx) * 2, &tcx)?; // `Size` multiplication
855 allocation.write_scalar(&tcx, offset_ptr, val.into(), Size::from_bytes(2))?;
857 src.next().expect_none("iterator was longer than it said it would be");
861 /// Expects the caller to have checked bounds and alignment.
864 src: Pointer<M::PointerTag>,
865 dest: Pointer<M::PointerTag>,
867 nonoverlapping: bool,
868 ) -> InterpResult<'tcx> {
869 self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
872 /// Expects the caller to have checked bounds and alignment.
873 pub fn copy_repeatedly(
875 src: Pointer<M::PointerTag>,
876 dest: Pointer<M::PointerTag>,
879 nonoverlapping: bool,
880 ) -> InterpResult<'tcx> {
881 // first copy the relocations to a temporary buffer, because
882 // `get_bytes_mut` will clear the relocations, which is correct,
883 // since we don't want to keep any relocations at the target.
884 // (`get_bytes_with_undef_and_ptr` below checks that there are no
885 // relocations overlapping the edges; those would not be handled correctly).
887 self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
889 let tcx = self.tcx.tcx;
891 // This checks relocation edges on the src.
893 self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
895 self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
897 // If `dest_bytes` is empty we just optimize to not run anything for zsts.
899 if dest_bytes.is_empty() {
903 let dest_bytes = dest_bytes.as_mut_ptr();
905 // Prepare a copy of the undef mask.
906 let compressed = self.get_raw(src.alloc_id)?.compress_undef_range(src, size);
908 if compressed.all_bytes_undef() {
909 // Fast path: If all bytes are `undef` then there is nothing to copy. The target range
910 // is marked as undef but we otherwise omit changing the byte representation which may
911 // be arbitrary for undef bytes.
912 // This also avoids writing to the target bytes so that the backing allocation is never
913 // touched if the bytes stay undef for the whole interpreter execution. On contemporary
914 // operating system this can avoid physically allocating the page.
915 let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
916 dest_alloc.mark_definedness(dest, size * length, false); // `Size` multiplication
917 dest_alloc.mark_relocation_range(relocations);
921 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
922 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
923 // `dest` could possibly overlap.
924 // The pointers above remain valid even if the `HashMap` table is moved around because they
925 // point into the `Vec` storing the bytes.
927 if src.alloc_id == dest.alloc_id {
930 if (src.offset <= dest.offset && src.offset + size > dest.offset)
931 || (dest.offset <= src.offset && dest.offset + size > src.offset)
933 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
940 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
946 ptr::copy_nonoverlapping(
948 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
955 // now fill in all the data
956 self.get_raw_mut(dest.alloc_id)?.mark_compressed_undef_range(
963 // copy the relocations to the destination
964 self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
970 /// Machine pointer introspection.
971 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
974 scalar: Scalar<M::PointerTag>,
975 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
977 Scalar::Ptr(ptr) => Ok(ptr),
978 _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
984 scalar: Scalar<M::PointerTag>,
986 ) -> InterpResult<'tcx, u128> {
987 match scalar.to_bits_or_ptr(size, self) {
988 Ok(bits) => Ok(bits),
989 Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),