1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std::collections::VecDeque;
13 use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
14 use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
15 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
17 use syntax::ast::Mutability;
20 Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
21 EvalResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
22 Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, InboundsCheck,
25 #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
26 pub enum MemoryKind<T> {
27 /// Error if deallocated except during a stack pop
29 /// Error if ever deallocated
31 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
35 impl<T: MayLeak> MayLeak for MemoryKind<T> {
37 fn may_leak(self) -> bool {
39 MemoryKind::Stack => false,
40 MemoryKind::Vtable => true,
41 MemoryKind::Machine(k) => k.may_leak()
46 // `Memory` has to depend on the `Machine` because some of its operations
47 // (e.g., `get`) call a `Machine` hook.
48 pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
49 /// Allocations local to this instance of the miri engine. The kind
50 /// helps ensure that the same mechanism is used for allocation and
51 /// deallocation. When an allocation is not found here, it is a
52 /// static and looked up in the `tcx` for read access. Some machines may
53 /// have to mutate this map even on a read-only access to a static (because
54 /// they do pointer provenance tracking and the allocations in `tcx` have
55 /// the wrong type), so we let the machine override this type.
56 /// Either way, if the machine allows writing to a static, doing so will
57 /// create a copy of the static allocation here.
58 alloc_map: M::MemoryMap,
60 /// To be able to compare pointers with NULL, and to check alignment for accesses
61 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
62 /// that do not exist any more.
63 dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
65 /// Extra data added by the machine.
66 pub extra: M::MemoryExtra,
68 /// Lets us implement `HasDataLayout`, which is awfully convenient.
69 pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
72 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
73 for Memory<'a, 'mir, 'tcx, M>
76 fn data_layout(&self) -> &TargetDataLayout {
81 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
82 // carefully copy only the reachable parts.
83 impl<'a, 'mir, 'tcx, M>
86 Memory<'a, 'mir, 'tcx, M>
88 M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=(), MemoryExtra=()>,
89 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
91 fn clone(&self) -> Self {
93 alloc_map: self.alloc_map.clone(),
94 dead_alloc_map: self.dead_alloc_map.clone(),
101 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
102 pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>) -> Self {
104 alloc_map: M::MemoryMap::default(),
105 dead_alloc_map: FxHashMap::default(),
106 extra: M::MemoryExtra::default(),
112 pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
113 ptr.with_tag(M::tag_static_base_pointer(ptr.alloc_id, &self.extra))
116 pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer<M::PointerTag> {
117 let id = self.tcx.alloc_map.lock().create_fn_alloc(instance);
118 self.tag_static_base_pointer(Pointer::from(id))
125 kind: MemoryKind<M::MemoryKinds>,
126 ) -> Pointer<M::PointerTag> {
127 let alloc = Allocation::undef(size, align);
128 self.allocate_with(alloc, kind)
131 pub fn allocate_static_bytes(
134 kind: MemoryKind<M::MemoryKinds>,
135 ) -> Pointer<M::PointerTag> {
136 let alloc = Allocation::from_byte_aligned_bytes(bytes);
137 self.allocate_with(alloc, kind)
140 pub fn allocate_with(
143 kind: MemoryKind<M::MemoryKinds>,
144 ) -> Pointer<M::PointerTag> {
145 let id = self.tcx.alloc_map.lock().reserve();
146 let (alloc, tag) = M::tag_allocation(id, Cow::Owned(alloc), Some(kind), &self.extra);
147 self.alloc_map.insert(id, (kind, alloc.into_owned()));
148 Pointer::from(id).with_tag(tag)
153 ptr: Pointer<M::PointerTag>,
158 kind: MemoryKind<M::MemoryKinds>,
159 ) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
160 if ptr.offset.bytes() != 0 {
161 return err!(ReallocateNonBasePtr);
164 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
165 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
166 let new_ptr = self.allocate(new_size, new_align, kind);
172 old_size.min(new_size),
173 /*nonoverlapping*/ true,
175 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
180 /// Deallocate a local, or do nothing if that local has been made into a static
181 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx> {
182 // The allocation might be already removed by static interning.
183 // This can only really happen in the CTFE instance, not in miri.
184 if self.alloc_map.contains_key(&ptr.alloc_id) {
185 self.deallocate(ptr, None, MemoryKind::Stack)
193 ptr: Pointer<M::PointerTag>,
194 size_and_align: Option<(Size, Align)>,
195 kind: MemoryKind<M::MemoryKinds>,
196 ) -> EvalResult<'tcx> {
197 trace!("deallocating: {}", ptr.alloc_id);
199 if ptr.offset.bytes() != 0 {
200 return err!(DeallocateNonBasePtr);
203 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
204 Some(alloc) => alloc,
206 // Deallocating static memory -- always an error
207 return match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
208 Some(GlobalAlloc::Function(..)) => err!(DeallocatedWrongMemoryKind(
209 "function".to_string(),
210 format!("{:?}", kind),
212 Some(GlobalAlloc::Static(..)) |
213 Some(GlobalAlloc::Memory(..)) => err!(DeallocatedWrongMemoryKind(
214 "static".to_string(),
215 format!("{:?}", kind),
217 None => err!(DoubleFree)
222 if alloc_kind != kind {
223 return err!(DeallocatedWrongMemoryKind(
224 format!("{:?}", alloc_kind),
225 format!("{:?}", kind),
228 if let Some((size, align)) = size_and_align {
229 if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
230 let bytes = Size::from_bytes(alloc.bytes.len() as u64);
231 return err!(IncorrectAllocationInformation(size,
238 // Let the machine take some extra action
239 let size = Size::from_bytes(alloc.bytes.len() as u64);
240 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
242 // Don't forget to remember size and align of this now-dead allocation
243 let old = self.dead_alloc_map.insert(
245 (Size::from_bytes(alloc.bytes.len() as u64), alloc.align)
248 bug!("Nothing can be deallocated twice");
254 /// Checks that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
255 /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
258 ptr: Scalar<M::PointerTag>,
259 required_align: Align
260 ) -> EvalResult<'tcx> {
261 // Check non-NULL/Undef, extract offset
262 let (offset, alloc_align) = match ptr.to_bits_or_ptr(self.pointer_size(), self) {
264 // check this is not NULL -- which we can ensure only if this is in-bounds
265 // of some (potentially dead) allocation.
266 let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
267 CheckInAllocMsg::NullPointerTest)?;
268 (ptr.offset.bytes(), align)
271 // check this is not NULL
273 return err!(InvalidNullPointerUsage);
275 // the "base address" is 0 and hence always aligned
276 (data as u64, required_align)
280 if alloc_align.bytes() < required_align.bytes() {
281 return err!(AlignmentCheckFailed {
283 required: required_align,
286 if offset % required_align.bytes() == 0 {
289 let has = offset % required_align.bytes();
290 err!(AlignmentCheckFailed {
291 has: Align::from_bytes(has).unwrap(),
292 required: required_align,
297 /// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end
298 /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
299 /// in-bounds! This follows C's/LLVM's rules.
300 /// If you want to check bounds before doing a memory access, better first obtain
301 /// an `Allocation` and call `check_bounds`.
302 pub fn check_bounds_ptr(
304 ptr: Pointer<M::PointerTag>,
305 liveness: InboundsCheck,
306 msg: CheckInAllocMsg,
307 ) -> EvalResult<'tcx, Align> {
308 let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id, liveness)?;
309 ptr.check_in_alloc(allocation_size, msg)?;
314 /// Allocation accessors
315 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
316 /// Helper function to obtain the global (tcx) allocation for a static.
317 /// This attempts to return a reference to an existing allocation if
318 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
319 /// this machine use the same pointer tag, so it is indirected through
320 /// `M::tag_allocation`.
322 /// Notice that every static has two `AllocId` that will resolve to the same
323 /// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
324 /// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
325 /// `const_eval_raw` and it is the "resolved" ID.
326 /// The resolved ID is never used by the interpreted progrma, it is hidden.
327 /// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
328 /// contains a reference to memory that was created during its evaluation (i.e., not to
329 /// another static), those inner references only exist in "resolved" form.
332 tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
333 memory_extra: &M::MemoryExtra,
334 ) -> EvalResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
335 let alloc = tcx.alloc_map.lock().get(id);
336 let alloc = match alloc {
337 Some(GlobalAlloc::Memory(mem)) =>
339 Some(GlobalAlloc::Function(..)) =>
340 return err!(DerefFunctionPointer),
342 return err!(DanglingPointerDeref),
343 Some(GlobalAlloc::Static(def_id)) => {
344 // We got a "lazy" static that has not been computed yet.
345 if tcx.is_foreign_item(def_id) {
346 trace!("static_alloc: foreign item {:?}", def_id);
347 M::find_foreign_static(def_id, tcx)?
349 trace!("static_alloc: Need to compute {:?}", def_id);
350 let instance = Instance::mono(tcx.tcx, def_id);
355 // use the raw query here to break validation cycles. Later uses of the static
356 // will call the full query anyway
357 let raw_const = tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid))
359 // no need to report anything, the const_eval call takes care of that
361 assert!(tcx.is_static(def_id));
363 ErrorHandled::Reported => InterpError::ReferencedConstant,
364 ErrorHandled::TooGeneric => InterpError::TooGeneric,
367 // Make sure we use the ID of the resolved memory, not the lazy one!
368 let id = raw_const.alloc_id;
369 let allocation = tcx.alloc_map.lock().unwrap_memory(id);
370 Cow::Borrowed(allocation)
374 // We got tcx memory. Let the machine figure out whether and how to
375 // turn that into memory with the right pointer tag.
376 Ok(M::tag_allocation(
377 id, // always use the ID we got as input, not the "hidden" one.
379 M::STATIC_KIND.map(MemoryKind::Machine),
384 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
385 // The error type of the inner closure here is somewhat funny. We have two
386 // ways of "erroring": An actual error, or because we got a reference from
387 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
388 // So the error type is `EvalResult<'tcx, &Allocation<M::PointerTag>>`.
389 let a = self.alloc_map.get_or(id, || {
390 let alloc = Self::get_static_alloc(id, self.tcx, &self.extra).map_err(Err)?;
392 Cow::Borrowed(alloc) => {
393 // We got a ref, cheaply return that as an "error" so that the
394 // map does not get mutated.
397 Cow::Owned(alloc) => {
398 // Need to put it into the map and return a ref to that
399 let kind = M::STATIC_KIND.expect(
400 "I got an owned allocation that I have to copy but the machine does \
401 not expect that to happen"
403 Ok((MemoryKind::Machine(kind), alloc))
407 // Now unpack that funny error type
417 ) -> EvalResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
419 let memory_extra = &self.extra;
420 let a = self.alloc_map.get_mut_or(id, || {
421 // Need to make a copy, even if `get_static_alloc` is able
422 // to give us a cheap reference.
423 let alloc = Self::get_static_alloc(id, tcx, memory_extra)?;
424 if alloc.mutability == Mutability::Immutable {
425 return err!(ModifiedConstantMemory);
427 match M::STATIC_KIND {
428 Some(kind) => Ok((MemoryKind::Machine(kind), alloc.into_owned())),
429 None => err!(ModifiedStatic),
432 // Unpack the error type manually because type inference doesn't
433 // work otherwise (and we cannot help it because `impl Trait`)
438 if a.mutability == Mutability::Immutable {
439 return err!(ModifiedConstantMemory);
446 /// Obtain the size and alignment of an allocation, even if that allocation has been deallocated
448 /// If `liveness` is `InboundsCheck::MaybeDead`, this function always returns `Ok`
449 pub fn get_size_and_align(
452 liveness: InboundsCheck,
453 ) -> EvalResult<'static, (Size, Align)> {
454 if let Ok(alloc) = self.get(id) {
455 return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
457 // Could also be a fn ptr or extern static
458 match self.tcx.alloc_map.lock().get(id) {
459 Some(GlobalAlloc::Function(..)) => Ok((Size::ZERO, Align::from_bytes(1).unwrap())),
460 Some(GlobalAlloc::Static(did)) => {
461 // The only way `get` couldn't have worked here is if this is an extern static
462 assert!(self.tcx.is_foreign_item(did));
463 // Use size and align of the type
464 let ty = self.tcx.type_of(did);
465 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
466 Ok((layout.size, layout.align.abi))
468 _ => match liveness {
469 InboundsCheck::MaybeDead => {
470 // Must be a deallocated pointer
471 Ok(*self.dead_alloc_map.get(&id).expect(
472 "allocation missing in dead_alloc_map"
475 InboundsCheck::Live => err!(DanglingPointerDeref),
480 pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, Instance<'tcx>> {
481 if ptr.offset.bytes() != 0 {
482 return err!(InvalidFunctionPointer);
484 trace!("reading fn ptr: {}", ptr.alloc_id);
485 match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
486 Some(GlobalAlloc::Function(instance)) => Ok(instance),
487 _ => Err(InterpError::ExecuteMemory.into()),
491 pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> {
492 self.get_mut(id)?.mutability = Mutability::Immutable;
496 /// For debugging, print an allocation and all allocations it points to, recursively.
497 pub fn dump_alloc(&self, id: AllocId) {
498 self.dump_allocs(vec![id]);
501 fn dump_alloc_helper<Tag, Extra>(
503 allocs_seen: &mut FxHashSet<AllocId>,
504 allocs_to_print: &mut VecDeque<AllocId>,
506 alloc: &Allocation<Tag, Extra>,
511 let prefix_len = msg.len();
512 let mut relocations = vec![];
514 for i in 0..(alloc.bytes.len() as u64) {
515 let i = Size::from_bytes(i);
516 if let Some(&(_, target_id)) = alloc.relocations.get(&i) {
517 if allocs_seen.insert(target_id) {
518 allocs_to_print.push_back(target_id);
520 relocations.push((i, target_id));
522 if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
523 // this `as usize` is fine, since `i` came from a `usize`
524 write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
531 "{}({} bytes, alignment {}){}",
538 if !relocations.is_empty() {
540 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
541 let mut pos = Size::ZERO;
542 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
543 for (i, target_id) in relocations {
544 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
545 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
546 let target = format!("({})", target_id);
547 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
548 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
549 pos = i + self.pointer_size();
555 /// For debugging, print a list of allocations and all allocations they point to, recursively.
556 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
557 if !log_enabled!(::log::Level::Trace) {
562 let mut allocs_to_print = VecDeque::from(allocs);
563 let mut allocs_seen = FxHashSet::default();
565 while let Some(id) = allocs_to_print.pop_front() {
566 let msg = format!("Alloc {:<5} ", format!("{}:", id));
569 match self.alloc_map.get_or(id, || Err(())) {
570 Ok((kind, alloc)) => {
571 let extra = match kind {
572 MemoryKind::Stack => " (stack)".to_owned(),
573 MemoryKind::Vtable => " (vtable)".to_owned(),
574 MemoryKind::Machine(m) => format!(" ({:?})", m),
576 self.dump_alloc_helper(
577 &mut allocs_seen, &mut allocs_to_print,
583 match self.tcx.alloc_map.lock().get(id) {
584 Some(GlobalAlloc::Memory(alloc)) => {
585 self.dump_alloc_helper(
586 &mut allocs_seen, &mut allocs_to_print,
587 msg, alloc, " (immutable)".to_owned()
590 Some(GlobalAlloc::Function(func)) => {
591 trace!("{} {}", msg, func);
593 Some(GlobalAlloc::Static(did)) => {
594 trace!("{} {:?}", msg, did);
597 trace!("{} (deallocated)", msg);
606 pub fn leak_report(&self) -> usize {
607 trace!("### LEAK REPORT ###");
608 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
609 if kind.may_leak() { None } else { Some(id) }
612 self.dump_allocs(leaks);
616 /// This is used by [priroda](https://github.com/oli-obk/priroda)
617 pub fn alloc_map(&self) -> &M::MemoryMap {
623 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
626 ptr: Scalar<M::PointerTag>,
628 ) -> EvalResult<'tcx, &[u8]> {
629 if size.bytes() == 0 {
632 let ptr = ptr.to_ptr()?;
633 self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
638 /// Interning (for CTFE)
639 impl<'a, 'mir, 'tcx, M> Memory<'a, 'mir, 'tcx, M>
641 M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=(), MemoryExtra=()>,
642 // FIXME: Working around https://github.com/rust-lang/rust/issues/24159
643 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
645 /// mark an allocation as static and initialized, either mutable or not
646 pub fn intern_static(
649 mutability: Mutability,
650 ) -> EvalResult<'tcx> {
652 "mark_static_initialized {:?}, mutability: {:?}",
657 let (kind, mut alloc) = self.alloc_map.remove(&alloc_id).unwrap();
659 MemoryKind::Machine(_) => bug!("Static cannot refer to machine memory"),
660 MemoryKind::Stack | MemoryKind::Vtable => {},
662 // ensure llvm knows not to put this into immutable memory
663 alloc.mutability = mutability;
664 let alloc = self.tcx.intern_const_alloc(alloc);
665 self.tcx.alloc_map.lock().set_alloc_id_memory(alloc_id, alloc);
666 // recurse into inner allocations
667 for &(_, alloc) in alloc.relocations.values() {
668 // FIXME: Reusing the mutability here is likely incorrect. It is originally
669 // determined via `is_freeze`, and data is considered frozen if there is no
670 // `UnsafeCell` *immediately* in that data -- however, this search stops
671 // at references. So whenever we follow a reference, we should likely
672 // assume immutability -- and we should make sure that the compiler
673 // does not permit code that would break this!
674 if self.alloc_map.contains_key(&alloc) {
675 // Not yet interned, so proceed recursively
676 self.intern_static(alloc, mutability)?;
677 } else if self.dead_alloc_map.contains_key(&alloc) {
679 return err!(ValidationFailure(
680 "encountered dangling pointer in final constant".into(),
688 /// Reading and writing.
689 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
692 src: Scalar<M::PointerTag>,
694 dest: Scalar<M::PointerTag>,
697 nonoverlapping: bool,
698 ) -> EvalResult<'tcx> {
699 self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
702 pub fn copy_repeatedly(
704 src: Scalar<M::PointerTag>,
706 dest: Scalar<M::PointerTag>,
710 nonoverlapping: bool,
711 ) -> EvalResult<'tcx> {
712 self.check_align(src, src_align)?;
713 self.check_align(dest, dest_align)?;
714 if size.bytes() == 0 {
715 // Nothing to do for ZST, other than checking alignment and
716 // non-NULLness which already happened.
719 let src = src.to_ptr()?;
720 let dest = dest.to_ptr()?;
722 // first copy the relocations to a temporary buffer, because
723 // `get_bytes_mut` will clear the relocations, which is correct,
724 // since we don't want to keep any relocations at the target.
725 // (`get_bytes_with_undef_and_ptr` below checks that there are no
726 // relocations overlapping the edges; those would not be handled correctly).
728 let relocations = self.get(src.alloc_id)?.relocations(self, src, size);
729 if relocations.is_empty() {
730 // nothing to copy, ignore even the `length` loop
733 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
735 new_relocations.extend(
738 .map(|&(offset, reloc)| {
739 // compute offset for current repetition
740 let dest_offset = dest.offset + (i * size);
742 // shift offsets from source allocation to destination allocation
743 offset + dest_offset - src.offset,
754 let tcx = self.tcx.tcx;
756 // This checks relocation edges on the src.
757 let src_bytes = self.get(src.alloc_id)?
758 .get_bytes_with_undef_and_ptr(&tcx, src, size)?
760 let dest_bytes = self.get_mut(dest.alloc_id)?
761 .get_bytes_mut(&tcx, dest, size * length)?
764 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
765 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
766 // `dest` could possibly overlap.
767 // The pointers above remain valid even if the `HashMap` table is moved around because they
768 // point into the `Vec` storing the bytes.
770 assert_eq!(size.bytes() as usize as u64, size.bytes());
771 if src.alloc_id == dest.alloc_id {
773 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
774 (dest.offset <= src.offset && dest.offset + size > src.offset)
776 return err!(Intrinsic(
777 "copy_nonoverlapping called on overlapping ranges".to_string(),
784 dest_bytes.offset((size.bytes() * i) as isize),
785 size.bytes() as usize);
789 ptr::copy_nonoverlapping(src_bytes,
790 dest_bytes.offset((size.bytes() * i) as isize),
791 size.bytes() as usize);
796 // copy definedness to the destination
797 self.copy_undef_mask(src, dest, size, length)?;
798 // copy the relocations to the destination
799 self.get_mut(dest.alloc_id)?.relocations.insert_presorted(relocations);
806 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
807 // FIXME: Add a fast version for the common, nonoverlapping case
810 src: Pointer<M::PointerTag>,
811 dest: Pointer<M::PointerTag>,
814 ) -> EvalResult<'tcx> {
815 // The bits have to be saved locally before writing to dest in case src and dest overlap.
816 assert_eq!(size.bytes() as usize as u64, size.bytes());
818 let undef_mask = &self.get(src.alloc_id)?.undef_mask;
820 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
821 // a naive undef mask copying algorithm would repeatedly have to read the undef mask from
822 // the source and write it to the destination. Even if we optimized the memory accesses,
823 // we'd be doing all of this `repeat` times.
824 // Therefor we precompute a compressed version of the undef mask of the source value and
825 // then write it back `repeat` times without computing any more information from the source.
827 // a precomputed cache for ranges of defined/undefined bits
828 // 0000010010001110 will become
829 // [5, 1, 2, 1, 3, 3, 1]
830 // where each element toggles the state
831 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
832 let first = undef_mask.get(src.offset);
835 for i in 1..size.bytes() {
836 // FIXME: optimize to bitshift the current undef block's bits and read the top bit
837 if undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
840 ranges.push(cur_len);
846 // now fill in all the data
847 let dest_allocation = self.get_mut(dest.alloc_id)?;
848 // an optimization where we can just overwrite an entire range of definedness bits if
849 // they are going to be uniformly `1` or `0`.
850 if ranges.is_empty() {
851 dest_allocation.undef_mask.set_range_inbounds(
853 dest.offset + size * repeat,
859 // remember to fill in the trailing bits
860 ranges.push(cur_len);
862 for mut j in 0..repeat {
864 j += dest.offset.bytes();
866 for range in &ranges {
869 dest_allocation.undef_mask.set_range_inbounds(
870 Size::from_bytes(old_j),