1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std::collections::VecDeque;
13 use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
14 use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
15 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
17 use syntax::ast::Mutability;
20 Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
21 InterpResult, Scalar, GlobalAlloc, PointerArithmetic,
22 Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg,
25 #[derive(Debug, PartialEq, Copy, Clone)]
26 pub enum MemoryKind<T> {
27 /// Error if deallocated except during a stack pop
29 /// Error if ever deallocated
31 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
35 impl<T: MayLeak> MayLeak for MemoryKind<T> {
37 fn may_leak(self) -> bool {
39 MemoryKind::Stack => false,
40 MemoryKind::Vtable => true,
41 MemoryKind::Machine(k) => k.may_leak()
46 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
47 #[derive(Debug, Copy, Clone)]
49 /// Allocation must be live and not a function pointer.
51 /// Allocations needs to be live, but may be a function pointer.
53 /// Allocation may be dead.
57 /// The value of a function pointer.
58 #[derive(Debug, Copy, Clone)]
59 pub enum FnVal<'tcx, Other> {
60 Instance(Instance<'tcx>),
64 impl<'tcx, Other> FnVal<'tcx, Other> {
65 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
67 FnVal::Instance(instance) =>
69 FnVal::Other(_) => throw_unsup_format!(
70 "'foreign' function pointers are not supported in this context"
76 // `Memory` has to depend on the `Machine` because some of its operations
77 // (e.g., `get`) call a `Machine` hook.
78 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
79 /// Allocations local to this instance of the miri engine. The kind
80 /// helps ensure that the same mechanism is used for allocation and
81 /// deallocation. When an allocation is not found here, it is a
82 /// static and looked up in the `tcx` for read access. Some machines may
83 /// have to mutate this map even on a read-only access to a static (because
84 /// they do pointer provenance tracking and the allocations in `tcx` have
85 /// the wrong type), so we let the machine override this type.
86 /// Either way, if the machine allows writing to a static, doing so will
87 /// create a copy of the static allocation here.
88 // FIXME: this should not be public, but interning currently needs access to it
89 pub(super) alloc_map: M::MemoryMap,
91 /// Map for "extra" function pointers.
92 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
94 /// To be able to compare pointers with NULL, and to check alignment for accesses
95 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
96 /// that do not exist any more.
97 // FIXME: this should not be public, but interning currently needs access to it
98 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
100 /// Extra data added by the machine.
101 pub extra: M::MemoryExtra,
103 /// Lets us implement `HasDataLayout`, which is awfully convenient.
104 pub tcx: TyCtxtAt<'tcx>,
107 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
109 fn data_layout(&self) -> &TargetDataLayout {
110 &self.tcx.data_layout
114 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
115 // carefully copy only the reachable parts.
116 impl<'mir, 'tcx, M> Clone for Memory<'mir, 'tcx, M>
118 M: Machine<'mir, 'tcx, PointerTag = (), AllocExtra = (), MemoryExtra = ()>,
119 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
121 fn clone(&self) -> Self {
123 alloc_map: self.alloc_map.clone(),
124 extra_fn_ptr_map: self.extra_fn_ptr_map.clone(),
125 dead_alloc_map: self.dead_alloc_map.clone(),
132 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
133 pub fn new(tcx: TyCtxtAt<'tcx>, extra: M::MemoryExtra) -> Self {
135 alloc_map: M::MemoryMap::default(),
136 extra_fn_ptr_map: FxHashMap::default(),
137 dead_alloc_map: FxHashMap::default(),
144 pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
145 ptr.with_tag(M::tag_static_base_pointer(&self.extra, ptr.alloc_id))
148 pub fn create_fn_alloc(
150 fn_val: FnVal<'tcx, M::ExtraFnVal>,
151 ) -> Pointer<M::PointerTag>
153 let id = match fn_val {
154 FnVal::Instance(instance) => self.tcx.alloc_map.lock().create_fn_alloc(instance),
155 FnVal::Other(extra) => {
156 // FIXME(RalfJung): Should we have a cache here?
157 let id = self.tcx.alloc_map.lock().reserve();
158 let old = self.extra_fn_ptr_map.insert(id, extra);
159 assert!(old.is_none());
163 self.tag_static_base_pointer(Pointer::from(id))
170 kind: MemoryKind<M::MemoryKinds>,
171 ) -> Pointer<M::PointerTag> {
172 let alloc = Allocation::undef(size, align);
173 self.allocate_with(alloc, kind)
176 pub fn allocate_static_bytes(
179 kind: MemoryKind<M::MemoryKinds>,
180 ) -> Pointer<M::PointerTag> {
181 let alloc = Allocation::from_byte_aligned_bytes(bytes);
182 self.allocate_with(alloc, kind)
185 pub fn allocate_with(
188 kind: MemoryKind<M::MemoryKinds>,
189 ) -> Pointer<M::PointerTag> {
190 let id = self.tcx.alloc_map.lock().reserve();
191 let (alloc, tag) = M::tag_allocation(&self.extra, id, Cow::Owned(alloc), Some(kind));
192 self.alloc_map.insert(id, (kind, alloc.into_owned()));
193 Pointer::from(id).with_tag(tag)
198 ptr: Pointer<M::PointerTag>,
199 old_size_and_align: Option<(Size, Align)>,
202 kind: MemoryKind<M::MemoryKinds>,
203 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
204 if ptr.offset.bytes() != 0 {
205 throw_unsup!(ReallocateNonBasePtr)
208 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
209 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
210 let new_ptr = self.allocate(new_size, new_align, kind);
211 let old_size = match old_size_and_align {
212 Some((size, _align)) => size,
213 None => self.get_raw(ptr.alloc_id)?.size,
218 old_size.min(new_size),
219 /*nonoverlapping*/ true,
221 self.deallocate(ptr, old_size_and_align, kind)?;
226 /// Deallocate a local, or do nothing if that local has been made into a static
227 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
228 // The allocation might be already removed by static interning.
229 // This can only really happen in the CTFE instance, not in miri.
230 if self.alloc_map.contains_key(&ptr.alloc_id) {
231 self.deallocate(ptr, None, MemoryKind::Stack)
239 ptr: Pointer<M::PointerTag>,
240 old_size_and_align: Option<(Size, Align)>,
241 kind: MemoryKind<M::MemoryKinds>,
242 ) -> InterpResult<'tcx> {
243 trace!("deallocating: {}", ptr.alloc_id);
245 if ptr.offset.bytes() != 0 {
246 throw_unsup!(DeallocateNonBasePtr)
249 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
250 Some(alloc) => alloc,
252 // Deallocating static memory -- always an error
253 return Err(match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
254 Some(GlobalAlloc::Function(..)) => err_unsup!(DeallocatedWrongMemoryKind(
255 "function".to_string(),
256 format!("{:?}", kind),
258 Some(GlobalAlloc::Static(..)) | Some(GlobalAlloc::Memory(..)) => err_unsup!(
259 DeallocatedWrongMemoryKind("static".to_string(), format!("{:?}", kind))
261 None => err_unsup!(DoubleFree),
267 if alloc_kind != kind {
268 throw_unsup!(DeallocatedWrongMemoryKind(
269 format!("{:?}", alloc_kind),
270 format!("{:?}", kind),
273 if let Some((size, align)) = old_size_and_align {
274 if size != alloc.size || align != alloc.align {
275 let bytes = alloc.size;
276 throw_unsup!(IncorrectAllocationInformation(size, bytes, align, alloc.align))
280 // Let the machine take some extra action
281 let size = alloc.size;
282 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
284 // Don't forget to remember size and align of this now-dead allocation
285 let old = self.dead_alloc_map.insert(
287 (alloc.size, alloc.align)
290 bug!("Nothing can be deallocated twice");
296 /// Check if the given scalar is allowed to do a memory access of given `size`
297 /// and `align`. On success, returns `None` for zero-sized accesses (where
298 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
299 /// Crucially, if the input is a `Pointer`, we will test it for liveness
300 /// *even if* the size is 0.
302 /// Everyone accessing memory based on a `Scalar` should use this method to get the
303 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
304 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
307 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
308 /// this method is still appropriate.
310 pub fn check_ptr_access(
312 sptr: Scalar<M::PointerTag>,
315 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
316 let align = if M::CHECK_ALIGN { Some(align) } else { None };
317 self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
320 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
321 /// is `Some` (overriding `M::CHECK_ALIGN`). Also lets the caller control
322 /// the error message for the out-of-bounds case.
323 pub fn check_ptr_access_align(
325 sptr: Scalar<M::PointerTag>,
327 align: Option<Align>,
328 msg: CheckInAllocMsg,
329 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
330 fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
331 if offset % align.bytes() == 0 {
334 // The biggest power of two through which `offset` is divisible.
335 let offset_pow2 = 1 << offset.trailing_zeros();
336 throw_unsup!(AlignmentCheckFailed {
337 has: Align::from_bytes(offset_pow2).unwrap(),
343 // Normalize to a `Pointer` if we definitely need one.
344 let normalized = if size.bytes() == 0 {
345 // Can be an integer, just take what we got. We do NOT `force_bits` here;
346 // if this is already a `Pointer` we want to do the bounds checks!
349 // A "real" access, we must get a pointer.
350 Scalar::Ptr(self.force_ptr(sptr)?)
352 Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
354 let bits = bits as u64; // it's ptr-sized
355 assert!(size.bytes() == 0);
358 throw_unsup!(InvalidNullPointerUsage)
361 if let Some(align) = align {
362 check_offset_align(bits, align)?;
367 let (allocation_size, alloc_align) =
368 self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferencable)?;
369 // Test bounds. This also ensures non-NULL.
370 // It is sufficient to check this for the end pointer. The addition
371 // checks for overflow.
372 let end_ptr = ptr.offset(size, self)?;
373 end_ptr.check_inbounds_alloc(allocation_size, msg)?;
374 // Test align. Check this last; if both bounds and alignment are violated
375 // we want the error to be about the bounds.
376 if let Some(align) = align {
377 if alloc_align.bytes() < align.bytes() {
378 // The allocation itself is not aligned enough.
379 // FIXME: Alignment check is too strict, depending on the base address that
380 // got picked we might be aligned even if this check fails.
381 // We instead have to fall back to converting to an integer and checking
382 // the "real" alignment.
383 throw_unsup!(AlignmentCheckFailed {
388 check_offset_align(ptr.offset.bytes(), align)?;
391 // We can still be zero-sized in this branch, in which case we have to
393 if size.bytes() == 0 { None } else { Some(ptr) }
398 /// Test if the pointer might be NULL.
399 pub fn ptr_may_be_null(
401 ptr: Pointer<M::PointerTag>,
403 let (size, _align) = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
404 .expect("alloc info with MaybeDead cannot fail");
405 ptr.check_inbounds_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
409 /// Allocation accessors
410 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
411 /// Helper function to obtain the global (tcx) allocation for a static.
412 /// This attempts to return a reference to an existing allocation if
413 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
414 /// this machine use the same pointer tag, so it is indirected through
415 /// `M::tag_allocation`.
417 /// Notice that every static has two `AllocId` that will resolve to the same
418 /// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
419 /// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
420 /// `const_eval_raw` and it is the "resolved" ID.
421 /// The resolved ID is never used by the interpreted progrma, it is hidden.
422 /// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
423 /// contains a reference to memory that was created during its evaluation (i.e., not to
424 /// another static), those inner references only exist in "resolved" form.
426 memory_extra: &M::MemoryExtra,
429 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
430 let alloc = tcx.alloc_map.lock().get(id);
431 let alloc = match alloc {
432 Some(GlobalAlloc::Memory(mem)) =>
434 Some(GlobalAlloc::Function(..)) =>
435 throw_unsup!(DerefFunctionPointer),
437 throw_unsup!(DanglingPointerDeref),
438 Some(GlobalAlloc::Static(def_id)) => {
439 // We got a "lazy" static that has not been computed yet.
440 if tcx.is_foreign_item(def_id) {
441 trace!("static_alloc: foreign item {:?}", def_id);
442 M::find_foreign_static(tcx.tcx, def_id)?
444 trace!("static_alloc: Need to compute {:?}", def_id);
445 let instance = Instance::mono(tcx.tcx, def_id);
450 // use the raw query here to break validation cycles. Later uses of the static
451 // will call the full query anyway
452 let raw_const = tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid))
454 // no need to report anything, the const_eval call takes care of that
456 assert!(tcx.is_static(def_id));
458 ErrorHandled::Reported =>
459 err_inval!(ReferencedConstant),
460 ErrorHandled::TooGeneric =>
461 err_inval!(TooGeneric),
464 // Make sure we use the ID of the resolved memory, not the lazy one!
465 let id = raw_const.alloc_id;
466 let allocation = tcx.alloc_map.lock().unwrap_memory(id);
468 M::before_access_static(allocation)?;
469 Cow::Borrowed(allocation)
473 // We got tcx memory. Let the machine figure out whether and how to
474 // turn that into memory with the right pointer tag.
475 Ok(M::tag_allocation(
477 id, // always use the ID we got as input, not the "hidden" one.
479 M::STATIC_KIND.map(MemoryKind::Machine),
483 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
484 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
488 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
489 // The error type of the inner closure here is somewhat funny. We have two
490 // ways of "erroring": An actual error, or because we got a reference from
491 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
492 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
493 let a = self.alloc_map.get_or(id, || {
494 let alloc = Self::get_static_alloc(&self.extra, self.tcx, id).map_err(Err)?;
496 Cow::Borrowed(alloc) => {
497 // We got a ref, cheaply return that as an "error" so that the
498 // map does not get mutated.
501 Cow::Owned(alloc) => {
502 // Need to put it into the map and return a ref to that
503 let kind = M::STATIC_KIND.expect(
504 "I got an owned allocation that I have to copy but the machine does \
505 not expect that to happen"
507 Ok((MemoryKind::Machine(kind), alloc))
511 // Now unpack that funny error type
518 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
519 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
523 ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
525 let memory_extra = &self.extra;
526 let a = self.alloc_map.get_mut_or(id, || {
527 // Need to make a copy, even if `get_static_alloc` is able
528 // to give us a cheap reference.
529 let alloc = Self::get_static_alloc(memory_extra, tcx, id)?;
530 if alloc.mutability == Mutability::Immutable {
531 throw_unsup!(ModifiedConstantMemory)
533 match M::STATIC_KIND {
534 Some(kind) => Ok((MemoryKind::Machine(kind), alloc.into_owned())),
535 None => throw_unsup!(ModifiedStatic),
538 // Unpack the error type manually because type inference doesn't
539 // work otherwise (and we cannot help it because `impl Trait`)
544 if a.mutability == Mutability::Immutable {
545 throw_unsup!(ModifiedConstantMemory)
552 /// Obtain the size and alignment of an allocation, even if that allocation has
553 /// been deallocated.
555 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
556 pub fn get_size_and_align(
559 liveness: AllocCheck,
560 ) -> InterpResult<'static, (Size, Align)> {
561 // # Regular allocations
562 // Don't use `self.get_raw` here as that will
563 // a) cause cycles in case `id` refers to a static
564 // b) duplicate a static's allocation in miri
565 if let Some((_, alloc)) = self.alloc_map.get(id) {
566 return Ok((alloc.size, alloc.align));
569 // # Function pointers
570 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
571 if let Ok(_) = self.get_fn_alloc(id) {
572 return if let AllocCheck::Dereferencable = liveness {
573 // The caller requested no function pointers.
574 throw_unsup!(DerefFunctionPointer)
576 Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
581 // Can't do this in the match argument, we may get cycle errors since the lock would
582 // be held throughout the match.
583 let alloc = self.tcx.alloc_map.lock().get(id);
585 Some(GlobalAlloc::Static(did)) => {
586 // Use size and align of the type.
587 let ty = self.tcx.type_of(did);
588 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
589 Ok((layout.size, layout.align.abi))
591 Some(GlobalAlloc::Memory(alloc)) =>
592 // Need to duplicate the logic here, because the global allocations have
593 // different associated types than the interpreter-local ones.
594 Ok((alloc.size, alloc.align)),
595 Some(GlobalAlloc::Function(_)) =>
596 bug!("We already checked function pointers above"),
597 // The rest must be dead.
598 None => if let AllocCheck::MaybeDead = liveness {
599 // Deallocated pointers are allowed, we should be able to find
601 Ok(*self.dead_alloc_map.get(&id)
602 .expect("deallocated pointers should all be recorded in \
605 throw_unsup!(DanglingPointerDeref)
610 fn get_fn_alloc(&self, id: AllocId) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
611 trace!("reading fn ptr: {}", id);
612 if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
613 Ok(FnVal::Other(*extra))
615 match self.tcx.alloc_map.lock().get(id) {
616 Some(GlobalAlloc::Function(instance)) => Ok(FnVal::Instance(instance)),
617 _ => throw_unsup!(ExecuteMemory),
624 ptr: Scalar<M::PointerTag>,
625 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
626 let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
627 if ptr.offset.bytes() != 0 {
628 throw_unsup!(InvalidFunctionPointer)
630 self.get_fn_alloc(ptr.alloc_id)
633 pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
634 self.get_raw_mut(id)?.mutability = Mutability::Immutable;
638 /// For debugging, print an allocation and all allocations it points to, recursively.
639 pub fn dump_alloc(&self, id: AllocId) {
640 self.dump_allocs(vec![id]);
643 fn dump_alloc_helper<Tag, Extra>(
645 allocs_seen: &mut FxHashSet<AllocId>,
646 allocs_to_print: &mut VecDeque<AllocId>,
648 alloc: &Allocation<Tag, Extra>,
653 let prefix_len = msg.len();
654 let mut relocations = vec![];
656 for i in 0..alloc.size.bytes() {
657 let i = Size::from_bytes(i);
658 if let Some(&(_, target_id)) = alloc.relocations().get(&i) {
659 if allocs_seen.insert(target_id) {
660 allocs_to_print.push_back(target_id);
662 relocations.push((i, target_id));
664 if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
665 // this `as usize` is fine, since `i` came from a `usize`
666 let i = i.bytes() as usize;
668 // Checked definedness (and thus range) and relocations. This access also doesn't
669 // influence interpreter execution but is only for debugging.
670 let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(i..i+1);
671 write!(msg, "{:02x} ", bytes[0]).unwrap();
678 "{}({} bytes, alignment {}){}",
685 if !relocations.is_empty() {
687 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
688 let mut pos = Size::ZERO;
689 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
690 for (i, target_id) in relocations {
691 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
692 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
693 let target = format!("({})", target_id);
694 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
695 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
696 pos = i + self.pointer_size();
702 /// For debugging, print a list of allocations and all allocations they point to, recursively.
703 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
704 if !log_enabled!(::log::Level::Trace) {
709 let mut allocs_to_print = VecDeque::from(allocs);
710 let mut allocs_seen = FxHashSet::default();
712 while let Some(id) = allocs_to_print.pop_front() {
713 let msg = format!("Alloc {:<5} ", format!("{}:", id));
716 match self.alloc_map.get_or(id, || Err(())) {
717 Ok((kind, alloc)) => {
718 let extra = match kind {
719 MemoryKind::Stack => " (stack)".to_owned(),
720 MemoryKind::Vtable => " (vtable)".to_owned(),
721 MemoryKind::Machine(m) => format!(" ({:?})", m),
723 self.dump_alloc_helper(
724 &mut allocs_seen, &mut allocs_to_print,
730 match self.tcx.alloc_map.lock().get(id) {
731 Some(GlobalAlloc::Memory(alloc)) => {
732 self.dump_alloc_helper(
733 &mut allocs_seen, &mut allocs_to_print,
734 msg, alloc, " (immutable)".to_owned()
737 Some(GlobalAlloc::Function(func)) => {
738 trace!("{} {}", msg, func);
740 Some(GlobalAlloc::Static(did)) => {
741 trace!("{} {:?}", msg, did);
744 trace!("{} (deallocated)", msg);
753 pub fn leak_report(&self) -> usize {
754 trace!("### LEAK REPORT ###");
755 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
756 if kind.may_leak() { None } else { Some(id) }
759 self.dump_allocs(leaks);
763 /// This is used by [priroda](https://github.com/oli-obk/priroda)
764 pub fn alloc_map(&self) -> &M::MemoryMap {
769 /// Reading and writing.
770 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
771 /// Reads the given number of bytes from memory. Returns them as a slice.
773 /// Performs appropriate bounds checks.
776 ptr: Scalar<M::PointerTag>,
778 ) -> InterpResult<'tcx, &[u8]> {
779 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
781 None => return Ok(&[]), // zero-sized access
783 self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
786 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
788 /// Performs appropriate bounds checks.
789 pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
790 let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
791 self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
794 /// Writes the given stream of bytes into memory.
796 /// Performs appropriate bounds checks.
799 ptr: Scalar<M::PointerTag>,
800 src: impl IntoIterator<Item=u8>,
801 ) -> InterpResult<'tcx>
803 let src = src.into_iter();
804 let size = Size::from_bytes(src.size_hint().0 as u64);
805 // `write_bytes` checks that this lower bound matches the upper bound matches reality.
806 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
808 None => return Ok(()), // zero-sized access
810 let tcx = self.tcx.tcx;
811 self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
814 /// Expects the caller to have checked bounds and alignment.
817 src: Pointer<M::PointerTag>,
818 dest: Pointer<M::PointerTag>,
820 nonoverlapping: bool,
821 ) -> InterpResult<'tcx> {
822 self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
825 /// Expects the caller to have checked bounds and alignment.
826 pub fn copy_repeatedly(
828 src: Pointer<M::PointerTag>,
829 dest: Pointer<M::PointerTag>,
832 nonoverlapping: bool,
833 ) -> InterpResult<'tcx> {
834 // first copy the relocations to a temporary buffer, because
835 // `get_bytes_mut` will clear the relocations, which is correct,
836 // since we don't want to keep any relocations at the target.
837 // (`get_bytes_with_undef_and_ptr` below checks that there are no
838 // relocations overlapping the edges; those would not be handled correctly).
839 let relocations = self.get_raw(src.alloc_id)?
840 .prepare_relocation_copy(self, src, size, dest, length);
842 let tcx = self.tcx.tcx;
844 // This checks relocation edges on the src.
845 let src_bytes = self.get_raw(src.alloc_id)?
846 .get_bytes_with_undef_and_ptr(&tcx, src, size)?
848 let dest_bytes = self.get_raw_mut(dest.alloc_id)?
849 .get_bytes_mut(&tcx, dest, size * length)?
852 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
853 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
854 // `dest` could possibly overlap.
855 // The pointers above remain valid even if the `HashMap` table is moved around because they
856 // point into the `Vec` storing the bytes.
858 assert_eq!(size.bytes() as usize as u64, size.bytes());
859 if src.alloc_id == dest.alloc_id {
861 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
862 (dest.offset <= src.offset && dest.offset + size > src.offset)
865 "copy_nonoverlapping called on overlapping ranges"
872 dest_bytes.offset((size.bytes() * i) as isize),
873 size.bytes() as usize);
877 ptr::copy_nonoverlapping(src_bytes,
878 dest_bytes.offset((size.bytes() * i) as isize),
879 size.bytes() as usize);
884 // copy definedness to the destination
885 self.copy_undef_mask(src, dest, size, length)?;
886 // copy the relocations to the destination
887 self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
894 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
895 // FIXME: Add a fast version for the common, nonoverlapping case
898 src: Pointer<M::PointerTag>,
899 dest: Pointer<M::PointerTag>,
902 ) -> InterpResult<'tcx> {
903 // The bits have to be saved locally before writing to dest in case src and dest overlap.
904 assert_eq!(size.bytes() as usize as u64, size.bytes());
906 let src_alloc = self.get_raw(src.alloc_id)?;
907 let compressed = src_alloc.compress_undef_range(src, size);
909 // now fill in all the data
910 let dest_allocation = self.get_raw_mut(dest.alloc_id)?;
911 dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat);
918 scalar: Scalar<M::PointerTag>,
919 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
921 Scalar::Ptr(ptr) => Ok(ptr),
922 _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?)
928 scalar: Scalar<M::PointerTag>,
930 ) -> InterpResult<'tcx, u128> {
931 match scalar.to_bits_or_ptr(size, self) {
932 Ok(bits) => Ok(bits),
933 Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128)