1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std::collections::VecDeque;
13 use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
14 use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
15 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
17 use syntax::ast::Mutability;
20 Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
21 InterpResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
22 Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, InvalidProgramInfo,
25 #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
26 pub enum MemoryKind<T> {
27 /// Error if deallocated except during a stack pop
29 /// Error if ever deallocated
31 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
35 impl<T: MayLeak> MayLeak for MemoryKind<T> {
37 fn may_leak(self) -> bool {
39 MemoryKind::Stack => false,
40 MemoryKind::Vtable => true,
41 MemoryKind::Machine(k) => k.may_leak()
46 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
47 #[derive(Debug, Copy, Clone)]
49 /// Allocation must be live and not a function pointer.
51 /// Allocations needs to be live, but may be a function pointer.
53 /// Allocation may be dead.
57 /// The value of a function pointer.
58 #[derive(Debug, Copy, Clone)]
59 pub enum FnVal<'tcx, Other> {
60 Instance(Instance<'tcx>),
64 impl<'tcx, Other> FnVal<'tcx, Other> {
65 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
67 FnVal::Instance(instance) =>
69 FnVal::Other(_) => err!(MachineError(format!(
70 "Expected instance function pointer, got 'other' pointer"
76 // `Memory` has to depend on the `Machine` because some of its operations
77 // (e.g., `get`) call a `Machine` hook.
78 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
79 /// Allocations local to this instance of the miri engine. The kind
80 /// helps ensure that the same mechanism is used for allocation and
81 /// deallocation. When an allocation is not found here, it is a
82 /// static and looked up in the `tcx` for read access. Some machines may
83 /// have to mutate this map even on a read-only access to a static (because
84 /// they do pointer provenance tracking and the allocations in `tcx` have
85 /// the wrong type), so we let the machine override this type.
86 /// Either way, if the machine allows writing to a static, doing so will
87 /// create a copy of the static allocation here.
88 // FIXME: this should not be public, but interning currently needs access to it
89 pub(super) alloc_map: M::MemoryMap,
91 /// Map for "extra" function pointers.
92 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
94 /// To be able to compare pointers with NULL, and to check alignment for accesses
95 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
96 /// that do not exist any more.
97 // FIXME: this should not be public, but interning currently needs access to it
98 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
100 /// Extra data added by the machine.
101 pub extra: M::MemoryExtra,
103 /// Lets us implement `HasDataLayout`, which is awfully convenient.
104 pub tcx: TyCtxtAt<'tcx>,
107 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
109 fn data_layout(&self) -> &TargetDataLayout {
110 &self.tcx.data_layout
114 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
115 // carefully copy only the reachable parts.
116 impl<'mir, 'tcx, M> Clone for Memory<'mir, 'tcx, M>
118 M: Machine<'mir, 'tcx, PointerTag = (), AllocExtra = (), MemoryExtra = ()>,
119 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
121 fn clone(&self) -> Self {
123 alloc_map: self.alloc_map.clone(),
124 extra_fn_ptr_map: self.extra_fn_ptr_map.clone(),
125 dead_alloc_map: self.dead_alloc_map.clone(),
132 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
133 pub fn new(tcx: TyCtxtAt<'tcx>, extra: M::MemoryExtra) -> Self {
135 alloc_map: M::MemoryMap::default(),
136 extra_fn_ptr_map: FxHashMap::default(),
137 dead_alloc_map: FxHashMap::default(),
144 pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
145 ptr.with_tag(M::tag_static_base_pointer(&self.extra, ptr.alloc_id))
148 pub fn create_fn_alloc(
150 fn_val: FnVal<'tcx, M::ExtraFnVal>,
151 ) -> Pointer<M::PointerTag>
153 let id = match fn_val {
154 FnVal::Instance(instance) => self.tcx.alloc_map.lock().create_fn_alloc(instance),
155 FnVal::Other(extra) => {
156 // FIXME(RalfJung): Should we have a cache here?
157 let id = self.tcx.alloc_map.lock().reserve();
158 let old = self.extra_fn_ptr_map.insert(id, extra);
159 assert!(old.is_none());
163 self.tag_static_base_pointer(Pointer::from(id))
170 kind: MemoryKind<M::MemoryKinds>,
171 ) -> Pointer<M::PointerTag> {
172 let alloc = Allocation::undef(size, align);
173 self.allocate_with(alloc, kind)
176 pub fn allocate_static_bytes(
179 kind: MemoryKind<M::MemoryKinds>,
180 ) -> Pointer<M::PointerTag> {
181 let alloc = Allocation::from_byte_aligned_bytes(bytes);
182 self.allocate_with(alloc, kind)
185 pub fn allocate_with(
188 kind: MemoryKind<M::MemoryKinds>,
189 ) -> Pointer<M::PointerTag> {
190 let id = self.tcx.alloc_map.lock().reserve();
191 let (alloc, tag) = M::tag_allocation(&self.extra, id, Cow::Owned(alloc), Some(kind));
192 self.alloc_map.insert(id, (kind, alloc.into_owned()));
193 Pointer::from(id).with_tag(tag)
198 ptr: Pointer<M::PointerTag>,
199 old_size_and_align: Option<(Size, Align)>,
202 kind: MemoryKind<M::MemoryKinds>,
203 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
204 if ptr.offset.bytes() != 0 {
205 return err!(ReallocateNonBasePtr);
208 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
209 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
210 let new_ptr = self.allocate(new_size, new_align, kind);
211 let old_size = match old_size_and_align {
212 Some((size, _align)) => size,
213 None => Size::from_bytes(self.get(ptr.alloc_id)?.bytes.len() as u64),
218 old_size.min(new_size),
219 /*nonoverlapping*/ true,
221 self.deallocate(ptr, old_size_and_align, kind)?;
226 /// Deallocate a local, or do nothing if that local has been made into a static
227 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
228 // The allocation might be already removed by static interning.
229 // This can only really happen in the CTFE instance, not in miri.
230 if self.alloc_map.contains_key(&ptr.alloc_id) {
231 self.deallocate(ptr, None, MemoryKind::Stack)
239 ptr: Pointer<M::PointerTag>,
240 old_size_and_align: Option<(Size, Align)>,
241 kind: MemoryKind<M::MemoryKinds>,
242 ) -> InterpResult<'tcx> {
243 trace!("deallocating: {}", ptr.alloc_id);
245 if ptr.offset.bytes() != 0 {
246 return err!(DeallocateNonBasePtr);
249 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
250 Some(alloc) => alloc,
252 // Deallocating static memory -- always an error
253 return match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
254 Some(GlobalAlloc::Function(..)) => err!(DeallocatedWrongMemoryKind(
255 "function".to_string(),
256 format!("{:?}", kind),
258 Some(GlobalAlloc::Static(..)) |
259 Some(GlobalAlloc::Memory(..)) => err!(DeallocatedWrongMemoryKind(
260 "static".to_string(),
261 format!("{:?}", kind),
263 None => err!(DoubleFree)
268 if alloc_kind != kind {
269 return err!(DeallocatedWrongMemoryKind(
270 format!("{:?}", alloc_kind),
271 format!("{:?}", kind),
274 if let Some((size, align)) = old_size_and_align {
275 if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
276 let bytes = Size::from_bytes(alloc.bytes.len() as u64);
277 return err!(IncorrectAllocationInformation(size, bytes, align, alloc.align));
281 // Let the machine take some extra action
282 let size = Size::from_bytes(alloc.bytes.len() as u64);
283 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
285 // Don't forget to remember size and align of this now-dead allocation
286 let old = self.dead_alloc_map.insert(
288 (Size::from_bytes(alloc.bytes.len() as u64), alloc.align)
291 bug!("Nothing can be deallocated twice");
297 /// Check if the given scalar is allowed to do a memory access of given `size`
298 /// and `align`. On success, returns `None` for zero-sized accesses (where
299 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
300 /// Crucially, if the input is a `Pointer`, we will test it for liveness
301 /// *even of* the size is 0.
303 /// Everyone accessing memory based on a `Scalar` should use this method to get the
304 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
305 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
308 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
309 /// this method is still appropriate.
310 pub fn check_ptr_access(
312 sptr: Scalar<M::PointerTag>,
315 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
316 fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
317 if offset % align.bytes() == 0 {
320 // The biggest power of two through which `offset` is divisible.
321 let offset_pow2 = 1 << offset.trailing_zeros();
322 err!(AlignmentCheckFailed {
323 has: Align::from_bytes(offset_pow2).unwrap(),
329 // Normalize to a `Pointer` if we definitely need one.
330 let normalized = if size.bytes() == 0 {
331 // Can be an integer, just take what we got. We do NOT `force_bits` here;
332 // if this is already a `Pointer` we want to do the bounds checks!
335 // A "real" access, we must get a pointer.
336 Scalar::Ptr(self.force_ptr(sptr)?)
338 Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
340 let bits = bits as u64; // it's ptr-sized
341 assert!(size.bytes() == 0);
342 // Must be non-NULL and aligned.
344 return err!(InvalidNullPointerUsage);
346 check_offset_align(bits, align)?;
350 let (allocation_size, alloc_align) =
351 self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferencable)?;
352 // Test bounds. This also ensures non-NULL.
353 // It is sufficient to check this for the end pointer. The addition
354 // checks for overflow.
355 let end_ptr = ptr.offset(size, self)?;
356 end_ptr.check_in_alloc(allocation_size, CheckInAllocMsg::MemoryAccessTest)?;
357 // Test align. Check this last; if both bounds and alignment are violated
358 // we want the error to be about the bounds.
359 if alloc_align.bytes() < align.bytes() {
360 // The allocation itself is not aligned enough.
361 // FIXME: Alignment check is too strict, depending on the base address that
362 // got picked we might be aligned even if this check fails.
363 // We instead have to fall back to converting to an integer and checking
364 // the "real" alignment.
365 return err!(AlignmentCheckFailed {
370 check_offset_align(ptr.offset.bytes(), align)?;
372 // We can still be zero-sized in this branch, in which case we have to
374 if size.bytes() == 0 { None } else { Some(ptr) }
379 /// Test if the pointer might be NULL.
380 pub fn ptr_may_be_null(
382 ptr: Pointer<M::PointerTag>,
384 let (size, _align) = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
385 .expect("alloc info with MaybeDead cannot fail");
386 ptr.check_in_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
390 /// Allocation accessors
391 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
392 /// Helper function to obtain the global (tcx) allocation for a static.
393 /// This attempts to return a reference to an existing allocation if
394 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
395 /// this machine use the same pointer tag, so it is indirected through
396 /// `M::tag_allocation`.
398 /// Notice that every static has two `AllocId` that will resolve to the same
399 /// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
400 /// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
401 /// `const_eval_raw` and it is the "resolved" ID.
402 /// The resolved ID is never used by the interpreted progrma, it is hidden.
403 /// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
404 /// contains a reference to memory that was created during its evaluation (i.e., not to
405 /// another static), those inner references only exist in "resolved" form.
407 memory_extra: &M::MemoryExtra,
410 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
411 let alloc = tcx.alloc_map.lock().get(id);
412 let alloc = match alloc {
413 Some(GlobalAlloc::Memory(mem)) =>
415 Some(GlobalAlloc::Function(..)) =>
416 return err!(DerefFunctionPointer),
418 return err!(DanglingPointerDeref),
419 Some(GlobalAlloc::Static(def_id)) => {
420 // We got a "lazy" static that has not been computed yet.
421 if tcx.is_foreign_item(def_id) {
422 trace!("static_alloc: foreign item {:?}", def_id);
423 M::find_foreign_static(tcx.tcx, def_id)?
425 trace!("static_alloc: Need to compute {:?}", def_id);
426 let instance = Instance::mono(tcx.tcx, def_id);
431 // use the raw query here to break validation cycles. Later uses of the static
432 // will call the full query anyway
433 let raw_const = tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid))
435 // no need to report anything, the const_eval call takes care of that
437 assert!(tcx.is_static(def_id));
439 ErrorHandled::Reported =>
440 InterpError::InvalidProgram(
441 InvalidProgramInfo::ReferencedConstant
443 ErrorHandled::TooGeneric =>
444 InterpError::InvalidProgram(InvalidProgramInfo::TooGeneric),
447 // Make sure we use the ID of the resolved memory, not the lazy one!
448 let id = raw_const.alloc_id;
449 let allocation = tcx.alloc_map.lock().unwrap_memory(id);
450 Cow::Borrowed(allocation)
454 // We got tcx memory. Let the machine figure out whether and how to
455 // turn that into memory with the right pointer tag.
456 Ok(M::tag_allocation(
458 id, // always use the ID we got as input, not the "hidden" one.
460 M::STATIC_KIND.map(MemoryKind::Machine),
467 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
468 // The error type of the inner closure here is somewhat funny. We have two
469 // ways of "erroring": An actual error, or because we got a reference from
470 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
471 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
472 let a = self.alloc_map.get_or(id, || {
473 let alloc = Self::get_static_alloc(&self.extra, self.tcx, id).map_err(Err)?;
475 Cow::Borrowed(alloc) => {
476 // We got a ref, cheaply return that as an "error" so that the
477 // map does not get mutated.
480 Cow::Owned(alloc) => {
481 // Need to put it into the map and return a ref to that
482 let kind = M::STATIC_KIND.expect(
483 "I got an owned allocation that I have to copy but the machine does \
484 not expect that to happen"
486 Ok((MemoryKind::Machine(kind), alloc))
490 // Now unpack that funny error type
500 ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
502 let memory_extra = &self.extra;
503 let a = self.alloc_map.get_mut_or(id, || {
504 // Need to make a copy, even if `get_static_alloc` is able
505 // to give us a cheap reference.
506 let alloc = Self::get_static_alloc(memory_extra, tcx, id)?;
507 if alloc.mutability == Mutability::Immutable {
508 return err!(ModifiedConstantMemory);
510 match M::STATIC_KIND {
511 Some(kind) => Ok((MemoryKind::Machine(kind), alloc.into_owned())),
512 None => err!(ModifiedStatic),
515 // Unpack the error type manually because type inference doesn't
516 // work otherwise (and we cannot help it because `impl Trait`)
521 if a.mutability == Mutability::Immutable {
522 return err!(ModifiedConstantMemory);
529 /// Obtain the size and alignment of an allocation, even if that allocation has
530 /// been deallocated.
532 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
533 pub fn get_size_and_align(
536 liveness: AllocCheck,
537 ) -> InterpResult<'static, (Size, Align)> {
538 // # Regular allocations
539 // Don't use `self.get` here as that will
540 // a) cause cycles in case `id` refers to a static
541 // b) duplicate a static's allocation in miri
542 if let Some((_, alloc)) = self.alloc_map.get(id) {
543 return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
546 // # Function pointers
547 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
548 if let Ok(_) = self.get_fn_alloc(id) {
549 return if let AllocCheck::Dereferencable = liveness {
550 // The caller requested no function pointers.
551 err!(DerefFunctionPointer)
553 Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
558 // Can't do this in the match argument, we may get cycle errors since the lock would
559 // be held throughout the match.
560 let alloc = self.tcx.alloc_map.lock().get(id);
562 Some(GlobalAlloc::Static(did)) => {
563 // Use size and align of the type.
564 let ty = self.tcx.type_of(did);
565 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
566 Ok((layout.size, layout.align.abi))
568 Some(GlobalAlloc::Memory(alloc)) =>
569 // Need to duplicate the logic here, because the global allocations have
570 // different associated types than the interpreter-local ones.
571 Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align)),
572 Some(GlobalAlloc::Function(_)) =>
573 bug!("We already checked function pointers above"),
574 // The rest must be dead.
575 None => if let AllocCheck::MaybeDead = liveness {
576 // Deallocated pointers are allowed, we should be able to find
578 Ok(*self.dead_alloc_map.get(&id)
579 .expect("deallocated pointers should all be recorded in \
582 err!(DanglingPointerDeref)
587 fn get_fn_alloc(&self, id: AllocId) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
588 trace!("reading fn ptr: {}", id);
589 if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
590 Ok(FnVal::Other(*extra))
592 match self.tcx.alloc_map.lock().get(id) {
593 Some(GlobalAlloc::Function(instance)) => Ok(FnVal::Instance(instance)),
594 _ => err!(ExecuteMemory),
601 ptr: Scalar<M::PointerTag>,
602 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
603 let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
604 if ptr.offset.bytes() != 0 {
605 return err!(InvalidFunctionPointer);
607 self.get_fn_alloc(ptr.alloc_id)
610 pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
611 self.get_mut(id)?.mutability = Mutability::Immutable;
615 /// For debugging, print an allocation and all allocations it points to, recursively.
616 pub fn dump_alloc(&self, id: AllocId) {
617 self.dump_allocs(vec![id]);
620 fn dump_alloc_helper<Tag, Extra>(
622 allocs_seen: &mut FxHashSet<AllocId>,
623 allocs_to_print: &mut VecDeque<AllocId>,
625 alloc: &Allocation<Tag, Extra>,
630 let prefix_len = msg.len();
631 let mut relocations = vec![];
633 for i in 0..(alloc.bytes.len() as u64) {
634 let i = Size::from_bytes(i);
635 if let Some(&(_, target_id)) = alloc.relocations.get(&i) {
636 if allocs_seen.insert(target_id) {
637 allocs_to_print.push_back(target_id);
639 relocations.push((i, target_id));
641 if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
642 // this `as usize` is fine, since `i` came from a `usize`
643 write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
650 "{}({} bytes, alignment {}){}",
657 if !relocations.is_empty() {
659 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
660 let mut pos = Size::ZERO;
661 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
662 for (i, target_id) in relocations {
663 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
664 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
665 let target = format!("({})", target_id);
666 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
667 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
668 pos = i + self.pointer_size();
674 /// For debugging, print a list of allocations and all allocations they point to, recursively.
675 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
676 if !log_enabled!(::log::Level::Trace) {
681 let mut allocs_to_print = VecDeque::from(allocs);
682 let mut allocs_seen = FxHashSet::default();
684 while let Some(id) = allocs_to_print.pop_front() {
685 let msg = format!("Alloc {:<5} ", format!("{}:", id));
688 match self.alloc_map.get_or(id, || Err(())) {
689 Ok((kind, alloc)) => {
690 let extra = match kind {
691 MemoryKind::Stack => " (stack)".to_owned(),
692 MemoryKind::Vtable => " (vtable)".to_owned(),
693 MemoryKind::Machine(m) => format!(" ({:?})", m),
695 self.dump_alloc_helper(
696 &mut allocs_seen, &mut allocs_to_print,
702 match self.tcx.alloc_map.lock().get(id) {
703 Some(GlobalAlloc::Memory(alloc)) => {
704 self.dump_alloc_helper(
705 &mut allocs_seen, &mut allocs_to_print,
706 msg, alloc, " (immutable)".to_owned()
709 Some(GlobalAlloc::Function(func)) => {
710 trace!("{} {}", msg, func);
712 Some(GlobalAlloc::Static(did)) => {
713 trace!("{} {:?}", msg, did);
716 trace!("{} (deallocated)", msg);
725 pub fn leak_report(&self) -> usize {
726 trace!("### LEAK REPORT ###");
727 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
728 if kind.may_leak() { None } else { Some(id) }
731 self.dump_allocs(leaks);
735 /// This is used by [priroda](https://github.com/oli-obk/priroda)
736 pub fn alloc_map(&self) -> &M::MemoryMap {
741 /// Reading and writing.
742 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
743 /// Reads the given number of bytes from memory. Returns them as a slice.
745 /// Performs appropriate bounds checks.
748 ptr: Scalar<M::PointerTag>,
750 ) -> InterpResult<'tcx, &[u8]> {
751 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
753 None => return Ok(&[]), // zero-sized access
755 self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
758 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
760 /// Performs appropriate bounds checks.
761 pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
762 let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
763 self.get(ptr.alloc_id)?.read_c_str(self, ptr)
766 /// Expects the caller to have checked bounds and alignment.
769 src: Pointer<M::PointerTag>,
770 dest: Pointer<M::PointerTag>,
772 nonoverlapping: bool,
773 ) -> InterpResult<'tcx> {
774 self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
777 /// Expects the caller to have checked bounds and alignment.
778 pub fn copy_repeatedly(
780 src: Pointer<M::PointerTag>,
781 dest: Pointer<M::PointerTag>,
784 nonoverlapping: bool,
785 ) -> InterpResult<'tcx> {
786 // first copy the relocations to a temporary buffer, because
787 // `get_bytes_mut` will clear the relocations, which is correct,
788 // since we don't want to keep any relocations at the target.
789 // (`get_bytes_with_undef_and_ptr` below checks that there are no
790 // relocations overlapping the edges; those would not be handled correctly).
792 let relocations = self.get(src.alloc_id)?.relocations(self, src, size);
793 if relocations.is_empty() {
794 // nothing to copy, ignore even the `length` loop
797 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
799 new_relocations.extend(
802 .map(|&(offset, reloc)| {
803 // compute offset for current repetition
804 let dest_offset = dest.offset + (i * size);
806 // shift offsets from source allocation to destination allocation
807 offset + dest_offset - src.offset,
818 let tcx = self.tcx.tcx;
820 // This checks relocation edges on the src.
821 let src_bytes = self.get(src.alloc_id)?
822 .get_bytes_with_undef_and_ptr(&tcx, src, size)?
824 let dest_bytes = self.get_mut(dest.alloc_id)?
825 .get_bytes_mut(&tcx, dest, size * length)?
828 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
829 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
830 // `dest` could possibly overlap.
831 // The pointers above remain valid even if the `HashMap` table is moved around because they
832 // point into the `Vec` storing the bytes.
834 assert_eq!(size.bytes() as usize as u64, size.bytes());
835 if src.alloc_id == dest.alloc_id {
837 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
838 (dest.offset <= src.offset && dest.offset + size > src.offset)
840 return err!(Intrinsic(
841 "copy_nonoverlapping called on overlapping ranges".to_string(),
848 dest_bytes.offset((size.bytes() * i) as isize),
849 size.bytes() as usize);
853 ptr::copy_nonoverlapping(src_bytes,
854 dest_bytes.offset((size.bytes() * i) as isize),
855 size.bytes() as usize);
860 // copy definedness to the destination
861 self.copy_undef_mask(src, dest, size, length)?;
862 // copy the relocations to the destination
863 self.get_mut(dest.alloc_id)?.relocations.insert_presorted(relocations);
870 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
871 // FIXME: Add a fast version for the common, nonoverlapping case
874 src: Pointer<M::PointerTag>,
875 dest: Pointer<M::PointerTag>,
878 ) -> InterpResult<'tcx> {
879 // The bits have to be saved locally before writing to dest in case src and dest overlap.
880 assert_eq!(size.bytes() as usize as u64, size.bytes());
882 let undef_mask = &self.get(src.alloc_id)?.undef_mask;
884 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
885 // a naive undef mask copying algorithm would repeatedly have to read the undef mask from
886 // the source and write it to the destination. Even if we optimized the memory accesses,
887 // we'd be doing all of this `repeat` times.
888 // Therefor we precompute a compressed version of the undef mask of the source value and
889 // then write it back `repeat` times without computing any more information from the source.
891 // a precomputed cache for ranges of defined/undefined bits
892 // 0000010010001110 will become
893 // [5, 1, 2, 1, 3, 3, 1]
894 // where each element toggles the state
895 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
896 let first = undef_mask.get(src.offset);
899 for i in 1..size.bytes() {
900 // FIXME: optimize to bitshift the current undef block's bits and read the top bit
901 if undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
904 ranges.push(cur_len);
910 // now fill in all the data
911 let dest_allocation = self.get_mut(dest.alloc_id)?;
912 // an optimization where we can just overwrite an entire range of definedness bits if
913 // they are going to be uniformly `1` or `0`.
914 if ranges.is_empty() {
915 dest_allocation.undef_mask.set_range_inbounds(
917 dest.offset + size * repeat,
923 // remember to fill in the trailing bits
924 ranges.push(cur_len);
926 for mut j in 0..repeat {
928 j += dest.offset.bytes();
930 for range in &ranges {
933 dest_allocation.undef_mask.set_range_inbounds(
934 Size::from_bytes(old_j),
946 scalar: Scalar<M::PointerTag>,
947 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
949 Scalar::Ptr(ptr) => Ok(ptr),
950 _ => M::int_to_ptr(&self, scalar.to_usize(self)?)
956 scalar: Scalar<M::PointerTag>,
958 ) -> InterpResult<'tcx, u128> {
959 match scalar.to_bits_or_ptr(size, self) {
960 Ok(bits) => Ok(bits),
961 Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128)