1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
10 use std::collections::VecDeque;
13 use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
14 use rustc::ty::{self, query::TyCtxtAt, Instance, ParamEnv};
15 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
17 use syntax::ast::Mutability;
20 AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, ErrorHandled, GlobalAlloc,
21 GlobalId, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
24 #[derive(Debug, PartialEq, Copy, Clone)]
25 pub enum MemoryKind<T> {
26 /// Stack memory. Error if deallocated except during a stack pop.
28 /// Memory backing vtables. Error if ever deallocated.
30 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
32 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
36 impl<T: MayLeak> MayLeak for MemoryKind<T> {
38 fn may_leak(self) -> bool {
40 MemoryKind::Stack => false,
41 MemoryKind::Vtable => true,
42 MemoryKind::CallerLocation => true,
43 MemoryKind::Machine(k) => k.may_leak(),
48 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
49 #[derive(Debug, Copy, Clone)]
51 /// Allocation must be live and not a function pointer.
53 /// Allocations needs to be live, but may be a function pointer.
55 /// Allocation may be dead.
59 /// The value of a function pointer.
60 #[derive(Debug, Copy, Clone)]
61 pub enum FnVal<'tcx, Other> {
62 Instance(Instance<'tcx>),
66 impl<'tcx, Other> FnVal<'tcx, Other> {
67 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
69 FnVal::Instance(instance) => Ok(instance),
71 throw_unsup_format!("'foreign' function pointers are not supported in this context")
77 // `Memory` has to depend on the `Machine` because some of its operations
78 // (e.g., `get`) call a `Machine` hook.
79 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
80 /// Allocations local to this instance of the miri engine. The kind
81 /// helps ensure that the same mechanism is used for allocation and
82 /// deallocation. When an allocation is not found here, it is a
83 /// static and looked up in the `tcx` for read access. Some machines may
84 /// have to mutate this map even on a read-only access to a static (because
85 /// they do pointer provenance tracking and the allocations in `tcx` have
86 /// the wrong type), so we let the machine override this type.
87 /// Either way, if the machine allows writing to a static, doing so will
88 /// create a copy of the static allocation here.
89 // FIXME: this should not be public, but interning currently needs access to it
90 pub(super) alloc_map: M::MemoryMap,
92 /// Map for "extra" function pointers.
93 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
95 /// To be able to compare pointers with NULL, and to check alignment for accesses
96 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
97 /// that do not exist any more.
98 // FIXME: this should not be public, but interning currently needs access to it
99 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
101 /// Extra data added by the machine.
102 pub extra: M::MemoryExtra,
104 /// Lets us implement `HasDataLayout`, which is awfully convenient.
105 pub tcx: TyCtxtAt<'tcx>,
108 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
110 fn data_layout(&self) -> &TargetDataLayout {
111 &self.tcx.data_layout
115 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
116 // carefully copy only the reachable parts.
117 impl<'mir, 'tcx, M> Clone for Memory<'mir, 'tcx, M>
119 M: Machine<'mir, 'tcx, PointerTag = (), AllocExtra = ()>,
120 M::MemoryExtra: Copy,
121 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
123 fn clone(&self) -> Self {
125 alloc_map: self.alloc_map.clone(),
126 extra_fn_ptr_map: self.extra_fn_ptr_map.clone(),
127 dead_alloc_map: self.dead_alloc_map.clone(),
134 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
135 pub fn new(tcx: TyCtxtAt<'tcx>, extra: M::MemoryExtra) -> Self {
137 alloc_map: M::MemoryMap::default(),
138 extra_fn_ptr_map: FxHashMap::default(),
139 dead_alloc_map: FxHashMap::default(),
145 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
146 /// the *canonical* machine pointer to the allocation. Must never be used
147 /// for any other pointers!
149 /// This represents a *direct* access to that memory, as opposed to access
150 /// through a pointer that was created by the program.
152 pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
153 ptr.with_tag(M::tag_static_base_pointer(&self.extra, ptr.alloc_id))
156 pub fn create_fn_alloc(
158 fn_val: FnVal<'tcx, M::ExtraFnVal>,
159 ) -> Pointer<M::PointerTag> {
160 let id = match fn_val {
161 FnVal::Instance(instance) => self.tcx.alloc_map.lock().create_fn_alloc(instance),
162 FnVal::Other(extra) => {
163 // FIXME(RalfJung): Should we have a cache here?
164 let id = self.tcx.alloc_map.lock().reserve();
165 let old = self.extra_fn_ptr_map.insert(id, extra);
166 assert!(old.is_none());
170 self.tag_static_base_pointer(Pointer::from(id))
177 kind: MemoryKind<M::MemoryKinds>,
178 ) -> Pointer<M::PointerTag> {
179 let alloc = Allocation::undef(size, align);
180 self.allocate_with(alloc, kind)
183 pub fn allocate_static_bytes(
186 kind: MemoryKind<M::MemoryKinds>,
187 ) -> Pointer<M::PointerTag> {
188 let alloc = Allocation::from_byte_aligned_bytes(bytes);
189 self.allocate_with(alloc, kind)
192 pub fn allocate_with(
195 kind: MemoryKind<M::MemoryKinds>,
196 ) -> Pointer<M::PointerTag> {
197 let id = self.tcx.alloc_map.lock().reserve();
200 M::STATIC_KIND.map(MemoryKind::Machine),
201 "dynamically allocating static memory"
203 let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
204 self.alloc_map.insert(id, (kind, alloc.into_owned()));
205 Pointer::from(id).with_tag(tag)
210 ptr: Pointer<M::PointerTag>,
211 old_size_and_align: Option<(Size, Align)>,
214 kind: MemoryKind<M::MemoryKinds>,
215 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
216 if ptr.offset.bytes() != 0 {
217 throw_unsup!(ReallocateNonBasePtr)
220 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
221 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
222 let new_ptr = self.allocate(new_size, new_align, kind);
223 let old_size = match old_size_and_align {
224 Some((size, _align)) => size,
225 None => self.get_raw(ptr.alloc_id)?.size,
227 self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
228 self.deallocate(ptr, old_size_and_align, kind)?;
233 /// Deallocate a local, or do nothing if that local has been made into a static
234 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
235 // The allocation might be already removed by static interning.
236 // This can only really happen in the CTFE instance, not in miri.
237 if self.alloc_map.contains_key(&ptr.alloc_id) {
238 self.deallocate(ptr, None, MemoryKind::Stack)
246 ptr: Pointer<M::PointerTag>,
247 old_size_and_align: Option<(Size, Align)>,
248 kind: MemoryKind<M::MemoryKinds>,
249 ) -> InterpResult<'tcx> {
250 trace!("deallocating: {}", ptr.alloc_id);
252 if ptr.offset.bytes() != 0 {
253 throw_unsup!(DeallocateNonBasePtr)
256 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
257 Some(alloc) => alloc,
259 // Deallocating static memory -- always an error
260 return Err(match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
261 Some(GlobalAlloc::Function(..)) => err_unsup!(DeallocatedWrongMemoryKind(
262 "function".to_string(),
263 format!("{:?}", kind),
265 Some(GlobalAlloc::Static(..)) | Some(GlobalAlloc::Memory(..)) => err_unsup!(
266 DeallocatedWrongMemoryKind("static".to_string(), format!("{:?}", kind))
268 None => err_unsup!(DoubleFree),
274 if alloc_kind != kind {
275 throw_unsup!(DeallocatedWrongMemoryKind(
276 format!("{:?}", alloc_kind),
277 format!("{:?}", kind),
280 if let Some((size, align)) = old_size_and_align {
281 if size != alloc.size || align != alloc.align {
282 let bytes = alloc.size;
283 throw_unsup!(IncorrectAllocationInformation(size, bytes, align, alloc.align))
287 // Let the machine take some extra action
288 let size = alloc.size;
289 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
291 // Don't forget to remember size and align of this now-dead allocation
292 let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size, alloc.align));
294 bug!("Nothing can be deallocated twice");
300 /// Check if the given scalar is allowed to do a memory access of given `size`
301 /// and `align`. On success, returns `None` for zero-sized accesses (where
302 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
303 /// Crucially, if the input is a `Pointer`, we will test it for liveness
304 /// *even if* the size is 0.
306 /// Everyone accessing memory based on a `Scalar` should use this method to get the
307 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
308 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
311 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
312 /// this method is still appropriate.
314 pub fn check_ptr_access(
316 sptr: Scalar<M::PointerTag>,
319 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
320 let align = M::CHECK_ALIGN.then_some(align);
321 self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
324 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
325 /// is `Some` (overriding `M::CHECK_ALIGN`). Also lets the caller control
326 /// the error message for the out-of-bounds case.
327 pub fn check_ptr_access_align(
329 sptr: Scalar<M::PointerTag>,
331 align: Option<Align>,
332 msg: CheckInAllocMsg,
333 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
334 fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
335 if offset % align.bytes() == 0 {
338 // The biggest power of two through which `offset` is divisible.
339 let offset_pow2 = 1 << offset.trailing_zeros();
340 throw_unsup!(AlignmentCheckFailed {
341 has: Align::from_bytes(offset_pow2).unwrap(),
347 // Normalize to a `Pointer` if we definitely need one.
348 let normalized = if size.bytes() == 0 {
349 // Can be an integer, just take what we got. We do NOT `force_bits` here;
350 // if this is already a `Pointer` we want to do the bounds checks!
353 // A "real" access, we must get a pointer.
354 Scalar::from(self.force_ptr(sptr)?)
356 Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
358 let bits = bits as u64; // it's ptr-sized
359 assert!(size.bytes() == 0);
362 throw_unsup!(InvalidNullPointerUsage)
365 if let Some(align) = align {
366 check_offset_align(bits, align)?;
371 let (allocation_size, alloc_align) =
372 self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
373 // Test bounds. This also ensures non-NULL.
374 // It is sufficient to check this for the end pointer. The addition
375 // checks for overflow.
376 let end_ptr = ptr.offset(size, self)?;
377 end_ptr.check_inbounds_alloc(allocation_size, msg)?;
378 // Test align. Check this last; if both bounds and alignment are violated
379 // we want the error to be about the bounds.
380 if let Some(align) = align {
381 if alloc_align.bytes() < align.bytes() {
382 // The allocation itself is not aligned enough.
383 // FIXME: Alignment check is too strict, depending on the base address that
384 // got picked we might be aligned even if this check fails.
385 // We instead have to fall back to converting to an integer and checking
386 // the "real" alignment.
387 throw_unsup!(AlignmentCheckFailed { has: alloc_align, required: align });
389 check_offset_align(ptr.offset.bytes(), align)?;
392 // We can still be zero-sized in this branch, in which case we have to
394 if size.bytes() == 0 { None } else { Some(ptr) }
399 /// Test if the pointer might be NULL.
400 pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
401 let (size, _align) = self
402 .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
403 .expect("alloc info with MaybeDead cannot fail");
404 ptr.check_inbounds_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
408 /// Allocation accessors
409 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
410 /// Helper function to obtain the global (tcx) allocation for a static.
411 /// This attempts to return a reference to an existing allocation if
412 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
413 /// this machine use the same pointer tag, so it is indirected through
414 /// `M::tag_allocation`.
416 /// Notice that every static has two `AllocId` that will resolve to the same
417 /// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
418 /// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
419 /// `const_eval_raw` and it is the "resolved" ID.
420 /// The resolved ID is never used by the interpreted progrma, it is hidden.
421 /// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
422 /// contains a reference to memory that was created during its evaluation (i.e., not to
423 /// another static), those inner references only exist in "resolved" form.
425 /// Assumes `id` is already canonical.
427 memory_extra: &M::MemoryExtra,
430 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
431 let alloc = tcx.alloc_map.lock().get(id);
432 let alloc = match alloc {
433 Some(GlobalAlloc::Memory(mem)) => Cow::Borrowed(mem),
434 Some(GlobalAlloc::Function(..)) => throw_unsup!(DerefFunctionPointer),
435 None => throw_unsup!(DanglingPointerDeref),
436 Some(GlobalAlloc::Static(def_id)) => {
437 // We got a "lazy" static that has not been computed yet.
438 if tcx.is_foreign_item(def_id) {
439 trace!("get_static_alloc: foreign item {:?}", def_id);
440 throw_unsup!(ReadForeignStatic)
442 trace!("get_static_alloc: Need to compute {:?}", def_id);
443 let instance = Instance::mono(tcx.tcx, def_id);
444 let gid = GlobalId { instance, promoted: None };
445 // use the raw query here to break validation cycles. Later uses of the static
446 // will call the full query anyway
448 tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
449 // no need to report anything, the const_eval call takes care of that
451 assert!(tcx.is_static(def_id));
453 ErrorHandled::Reported => err_inval!(ReferencedConstant),
454 ErrorHandled::TooGeneric => err_inval!(TooGeneric),
457 // Make sure we use the ID of the resolved memory, not the lazy one!
458 let id = raw_const.alloc_id;
459 let allocation = tcx.alloc_map.lock().unwrap_memory(id);
461 M::before_access_static(memory_extra, allocation)?;
462 Cow::Borrowed(allocation)
465 // We got tcx memory. Let the machine initialize its "extra" stuff.
466 let (alloc, tag) = M::init_allocation_extra(
468 id, // always use the ID we got as input, not the "hidden" one.
470 M::STATIC_KIND.map(MemoryKind::Machine),
472 debug_assert_eq!(tag, M::tag_static_base_pointer(memory_extra, id));
476 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
477 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
481 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
482 let id = M::canonical_alloc_id(self, id);
483 // The error type of the inner closure here is somewhat funny. We have two
484 // ways of "erroring": An actual error, or because we got a reference from
485 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
486 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
487 let a = self.alloc_map.get_or(id, || {
488 let alloc = Self::get_static_alloc(&self.extra, self.tcx, id).map_err(Err)?;
490 Cow::Borrowed(alloc) => {
491 // We got a ref, cheaply return that as an "error" so that the
492 // map does not get mutated.
495 Cow::Owned(alloc) => {
496 // Need to put it into the map and return a ref to that
497 let kind = M::STATIC_KIND.expect(
498 "I got an owned allocation that I have to copy but the machine does \
499 not expect that to happen",
501 Ok((MemoryKind::Machine(kind), alloc))
505 // Now unpack that funny error type
512 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
513 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
517 ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
518 let id = M::canonical_alloc_id(self, id);
520 let memory_extra = &self.extra;
521 let a = self.alloc_map.get_mut_or(id, || {
522 // Need to make a copy, even if `get_static_alloc` is able
523 // to give us a cheap reference.
524 let alloc = Self::get_static_alloc(memory_extra, tcx, id)?;
525 if alloc.mutability == Mutability::Not {
526 throw_unsup!(ModifiedConstantMemory)
528 match M::STATIC_KIND {
529 Some(kind) => Ok((MemoryKind::Machine(kind), alloc.into_owned())),
530 None => throw_unsup!(ModifiedStatic),
533 // Unpack the error type manually because type inference doesn't
534 // work otherwise (and we cannot help it because `impl Trait`)
539 if a.mutability == Mutability::Not {
540 throw_unsup!(ModifiedConstantMemory)
547 /// Obtain the size and alignment of an allocation, even if that allocation has
548 /// been deallocated.
550 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
551 pub fn get_size_and_align(
554 liveness: AllocCheck,
555 ) -> InterpResult<'static, (Size, Align)> {
556 let id = M::canonical_alloc_id(self, id);
557 // # Regular allocations
558 // Don't use `self.get_raw` here as that will
559 // a) cause cycles in case `id` refers to a static
560 // b) duplicate a static's allocation in miri
561 if let Some((_, alloc)) = self.alloc_map.get(id) {
562 return Ok((alloc.size, alloc.align));
565 // # Function pointers
566 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
567 if let Some(_) = self.get_fn_alloc(id) {
568 return if let AllocCheck::Dereferenceable = liveness {
569 // The caller requested no function pointers.
570 throw_unsup!(DerefFunctionPointer)
572 Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
577 // Can't do this in the match argument, we may get cycle errors since the lock would
578 // be held throughout the match.
579 let alloc = self.tcx.alloc_map.lock().get(id);
581 Some(GlobalAlloc::Static(did)) => {
582 // Use size and align of the type.
583 let ty = self.tcx.type_of(did);
584 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
585 Ok((layout.size, layout.align.abi))
587 Some(GlobalAlloc::Memory(alloc)) => {
588 // Need to duplicate the logic here, because the global allocations have
589 // different associated types than the interpreter-local ones.
590 Ok((alloc.size, alloc.align))
592 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
593 // The rest must be dead.
595 if let AllocCheck::MaybeDead = liveness {
596 // Deallocated pointers are allowed, we should be able to find
598 Ok(*self.dead_alloc_map.get(&id).expect(
599 "deallocated pointers should all be recorded in \
603 throw_unsup!(DanglingPointerDeref)
609 /// Assumes `id` is already canonical.
610 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
611 trace!("reading fn ptr: {}", id);
612 if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
613 Some(FnVal::Other(*extra))
615 match self.tcx.alloc_map.lock().get(id) {
616 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
624 ptr: Scalar<M::PointerTag>,
625 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
626 let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
627 if ptr.offset.bytes() != 0 {
628 throw_unsup!(InvalidFunctionPointer)
630 let id = M::canonical_alloc_id(self, ptr.alloc_id);
631 self.get_fn_alloc(id).ok_or_else(|| err_unsup!(ExecuteMemory).into())
634 pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
635 self.get_raw_mut(id)?.mutability = Mutability::Not;
639 /// Print an allocation and all allocations it points to, recursively.
640 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
641 /// control for this.
642 pub fn dump_alloc(&self, id: AllocId) {
643 self.dump_allocs(vec![id]);
646 fn dump_alloc_helper<Tag, Extra>(
648 allocs_seen: &mut FxHashSet<AllocId>,
649 allocs_to_print: &mut VecDeque<AllocId>,
651 alloc: &Allocation<Tag, Extra>,
656 let prefix_len = msg.len();
657 let mut relocations = vec![];
659 for i in 0..alloc.size.bytes() {
660 let i = Size::from_bytes(i);
661 if let Some(&(_, target_id)) = alloc.relocations().get(&i) {
662 if allocs_seen.insert(target_id) {
663 allocs_to_print.push_back(target_id);
665 relocations.push((i, target_id));
667 if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
668 // this `as usize` is fine, since `i` came from a `usize`
669 let i = i.bytes() as usize;
671 // Checked definedness (and thus range) and relocations. This access also doesn't
672 // influence interpreter execution but is only for debugging.
673 let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(i..i + 1);
674 write!(msg, "{:02x} ", bytes[0]).unwrap();
681 "{}({} bytes, alignment {}){}",
688 if !relocations.is_empty() {
690 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
691 let mut pos = Size::ZERO;
692 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
693 for (i, target_id) in relocations {
694 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
695 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
696 let target = format!("({})", target_id);
697 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
698 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
699 pos = i + self.pointer_size();
701 eprintln!("{}", msg);
705 /// Print a list of allocations and all allocations they point to, recursively.
706 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
707 /// control for this.
708 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
711 let mut allocs_to_print = VecDeque::from(allocs);
712 let mut allocs_seen = FxHashSet::default();
714 while let Some(id) = allocs_to_print.pop_front() {
715 let msg = format!("Alloc {:<5} ", format!("{}:", id));
718 match self.alloc_map.get_or(id, || Err(())) {
719 Ok((kind, alloc)) => {
720 let extra = match kind {
721 MemoryKind::Stack => " (stack)".to_owned(),
722 MemoryKind::Vtable => " (vtable)".to_owned(),
723 MemoryKind::CallerLocation => " (caller_location)".to_owned(),
724 MemoryKind::Machine(m) => format!(" ({:?})", m),
726 self.dump_alloc_helper(
728 &mut allocs_to_print,
736 match self.tcx.alloc_map.lock().get(id) {
737 Some(GlobalAlloc::Memory(alloc)) => {
738 self.dump_alloc_helper(
740 &mut allocs_to_print,
743 " (immutable)".to_owned(),
746 Some(GlobalAlloc::Function(func)) => {
747 eprintln!("{} {}", msg, func);
749 Some(GlobalAlloc::Static(did)) => {
750 eprintln!("{} {:?}", msg, did);
753 eprintln!("{} (deallocated)", msg);
761 pub fn leak_report(&self) -> usize {
762 let leaks: Vec<_> = self
764 .filter_map_collect(|&id, &(kind, _)| if kind.may_leak() { None } else { Some(id) });
767 eprintln!("### LEAK REPORT ###");
768 self.dump_allocs(leaks);
773 /// This is used by [priroda](https://github.com/oli-obk/priroda)
774 pub fn alloc_map(&self) -> &M::MemoryMap {
779 /// Reading and writing.
780 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
781 /// Reads the given number of bytes from memory. Returns them as a slice.
783 /// Performs appropriate bounds checks.
784 pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
785 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
787 None => return Ok(&[]), // zero-sized access
789 self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
792 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
794 /// Performs appropriate bounds checks.
795 pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
796 let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
797 self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
800 /// Writes the given stream of bytes into memory.
802 /// Performs appropriate bounds checks.
805 ptr: Scalar<M::PointerTag>,
806 src: impl IntoIterator<Item = u8>,
807 ) -> InterpResult<'tcx> {
808 let src = src.into_iter();
809 let size = Size::from_bytes(src.size_hint().0 as u64);
810 // `write_bytes` checks that this lower bound matches the upper bound matches reality.
811 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
813 None => return Ok(()), // zero-sized access
815 let tcx = self.tcx.tcx;
816 self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
819 /// Expects the caller to have checked bounds and alignment.
822 src: Pointer<M::PointerTag>,
823 dest: Pointer<M::PointerTag>,
825 nonoverlapping: bool,
826 ) -> InterpResult<'tcx> {
827 self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
830 /// Expects the caller to have checked bounds and alignment.
831 pub fn copy_repeatedly(
833 src: Pointer<M::PointerTag>,
834 dest: Pointer<M::PointerTag>,
837 nonoverlapping: bool,
838 ) -> InterpResult<'tcx> {
839 // first copy the relocations to a temporary buffer, because
840 // `get_bytes_mut` will clear the relocations, which is correct,
841 // since we don't want to keep any relocations at the target.
842 // (`get_bytes_with_undef_and_ptr` below checks that there are no
843 // relocations overlapping the edges; those would not be handled correctly).
845 self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
847 let tcx = self.tcx.tcx;
849 // The bits have to be saved locally before writing to dest in case src and dest overlap.
850 assert_eq!(size.bytes() as usize as u64, size.bytes());
852 // This checks relocation edges on the src.
854 self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
856 self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?;
858 // If `dest_bytes` is empty we just optimize to not run anything for zsts.
860 if dest_bytes.is_empty() {
864 let dest_bytes = dest_bytes.as_mut_ptr();
866 // Prepare a copy of the undef mask.
867 let compressed = self.get_raw(src.alloc_id)?.compress_undef_range(src, size);
869 if compressed.all_bytes_undef() {
870 // Fast path: If all bytes are `undef` then there is nothing to copy. The target range
871 // is marked as undef but we otherwise omit changing the byte representation which may
872 // be arbitrary for undef bytes.
873 // This also avoids writing to the target bytes so that the backing allocation is never
874 // touched if the bytes stay undef for the whole interpreter execution. On contemporary
875 // operating system this can avoid physically allocating the page.
876 let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
877 dest_alloc.mark_definedness(dest, size * length, false);
878 dest_alloc.mark_relocation_range(relocations);
882 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
883 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
884 // `dest` could possibly overlap.
885 // The pointers above remain valid even if the `HashMap` table is moved around because they
886 // point into the `Vec` storing the bytes.
888 assert_eq!(size.bytes() as usize as u64, size.bytes());
889 if src.alloc_id == dest.alloc_id {
891 if (src.offset <= dest.offset && src.offset + size > dest.offset)
892 || (dest.offset <= src.offset && dest.offset + size > src.offset)
894 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
901 dest_bytes.offset((size.bytes() * i) as isize),
902 size.bytes() as usize,
907 ptr::copy_nonoverlapping(
909 dest_bytes.offset((size.bytes() * i) as isize),
910 size.bytes() as usize,
916 // now fill in all the data
917 self.get_raw_mut(dest.alloc_id)?.mark_compressed_undef_range(
924 // copy the relocations to the destination
925 self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
931 /// Machine pointer introspection.
932 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
935 scalar: Scalar<M::PointerTag>,
936 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
938 Scalar::Ptr(ptr) => Ok(ptr),
939 _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
945 scalar: Scalar<M::PointerTag>,
947 ) -> InterpResult<'tcx, u128> {
948 match scalar.to_bits_or_ptr(size, self) {
949 Ok(bits) => Ok(bits),
950 Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128),