1 // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! The memory subsystem.
13 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
14 //! have a "size"-like parameter, and they take `Scalar` for the address because
15 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
16 //! integer. It is crucial that these operations call `check_align` *before*
17 //! short-circuiting the empty case!
19 use std::collections::VecDeque;
23 use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
24 use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
25 pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
26 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
28 use syntax::ast::Mutability;
31 Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
32 EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
33 Machine, AllocMap, MayLeak, ErrorHandled, InboundsCheck,
36 #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
37 pub enum MemoryKind<T> {
38 /// Error if deallocated except during a stack pop
40 /// Error if ever deallocated
42 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
46 impl<T: MayLeak> MayLeak for MemoryKind<T> {
48 fn may_leak(self) -> bool {
50 MemoryKind::Stack => false,
51 MemoryKind::Vtable => true,
52 MemoryKind::Machine(k) => k.may_leak()
57 // `Memory` has to depend on the `Machine` because some of its operations
58 // (e.g., `get`) call a `Machine` hook.
59 pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
60 /// Allocations local to this instance of the miri engine. The kind
61 /// helps ensure that the same mechanism is used for allocation and
62 /// deallocation. When an allocation is not found here, it is a
63 /// static and looked up in the `tcx` for read access. Some machines may
64 /// have to mutate this map even on a read-only access to a static (because
65 /// they do pointer provenance tracking and the allocations in `tcx` have
66 /// the wrong type), so we let the machine override this type.
67 /// Either way, if the machine allows writing to a static, doing so will
68 /// create a copy of the static allocation here.
69 alloc_map: M::MemoryMap,
71 /// To be able to compare pointers with NULL, and to check alignment for accesses
72 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
73 /// that do not exist any more.
74 dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
76 /// Extra data added by the machine.
77 pub extra: M::MemoryExtra,
79 /// Lets us implement `HasDataLayout`, which is awfully convenient.
80 pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
83 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
84 for Memory<'a, 'mir, 'tcx, M>
87 fn data_layout(&self) -> &TargetDataLayout {
92 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
93 // carefully copy only the reachable parts.
94 impl<'a, 'mir, 'tcx, M>
97 Memory<'a, 'mir, 'tcx, M>
99 M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=(), MemoryExtra=()>,
100 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
102 fn clone(&self) -> Self {
104 alloc_map: self.alloc_map.clone(),
105 dead_alloc_map: self.dead_alloc_map.clone(),
112 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
113 pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>) -> Self {
115 alloc_map: M::MemoryMap::default(),
116 dead_alloc_map: FxHashMap::default(),
117 extra: M::MemoryExtra::default(),
122 pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer {
123 Pointer::from(self.tcx.alloc_map.lock().create_fn_alloc(instance))
126 pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer {
127 Pointer::from(self.tcx.allocate_bytes(bytes))
130 pub fn allocate_with(
132 alloc: Allocation<M::PointerTag, M::AllocExtra>,
133 kind: MemoryKind<M::MemoryKinds>,
134 ) -> EvalResult<'tcx, AllocId> {
135 let id = self.tcx.alloc_map.lock().reserve();
136 self.alloc_map.insert(id, (kind, alloc));
144 kind: MemoryKind<M::MemoryKinds>,
145 ) -> EvalResult<'tcx, Pointer> {
146 let extra = AllocationExtra::memory_allocated(size, &self.extra);
147 Ok(Pointer::from(self.allocate_with(Allocation::undef(size, align, extra), kind)?))
152 ptr: Pointer<M::PointerTag>,
157 kind: MemoryKind<M::MemoryKinds>,
158 ) -> EvalResult<'tcx, Pointer> {
159 if ptr.offset.bytes() != 0 {
160 return err!(ReallocateNonBasePtr);
163 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
164 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
165 let new_ptr = self.allocate(new_size, new_align, kind)?;
169 new_ptr.with_default_tag().into(),
171 old_size.min(new_size),
172 /*nonoverlapping*/ true,
174 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
179 /// Deallocate a local, or do nothing if that local has been made into a static
180 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx> {
181 // The allocation might be already removed by static interning.
182 // This can only really happen in the CTFE instance, not in miri.
183 if self.alloc_map.contains_key(&ptr.alloc_id) {
184 self.deallocate(ptr, None, MemoryKind::Stack)
192 ptr: Pointer<M::PointerTag>,
193 size_and_align: Option<(Size, Align)>,
194 kind: MemoryKind<M::MemoryKinds>,
195 ) -> EvalResult<'tcx> {
196 trace!("deallocating: {}", ptr.alloc_id);
198 if ptr.offset.bytes() != 0 {
199 return err!(DeallocateNonBasePtr);
202 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
203 Some(alloc) => alloc,
205 // Deallocating static memory -- always an error
206 return match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
207 Some(AllocType::Function(..)) => err!(DeallocatedWrongMemoryKind(
208 "function".to_string(),
209 format!("{:?}", kind),
211 Some(AllocType::Static(..)) |
212 Some(AllocType::Memory(..)) => err!(DeallocatedWrongMemoryKind(
213 "static".to_string(),
214 format!("{:?}", kind),
216 None => err!(DoubleFree)
221 if alloc_kind != kind {
222 return err!(DeallocatedWrongMemoryKind(
223 format!("{:?}", alloc_kind),
224 format!("{:?}", kind),
227 if let Some((size, align)) = size_and_align {
228 if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
229 let bytes = Size::from_bytes(alloc.bytes.len() as u64);
230 return err!(IncorrectAllocationInformation(size,
237 // Let the machine take some extra action
238 let size = Size::from_bytes(alloc.bytes.len() as u64);
239 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
241 // Don't forget to remember size and align of this now-dead allocation
242 let old = self.dead_alloc_map.insert(
244 (Size::from_bytes(alloc.bytes.len() as u64), alloc.align)
247 bug!("Nothing can be deallocated twice");
253 /// Check that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
254 /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
257 ptr: Scalar<M::PointerTag>,
258 required_align: Align
259 ) -> EvalResult<'tcx> {
260 // Check non-NULL/Undef, extract offset
261 let (offset, alloc_align) = match ptr {
262 Scalar::Ptr(ptr) => {
263 // check this is not NULL -- which we can ensure only if this is in-bounds
264 // of some (potentially dead) allocation.
265 let align = self.check_bounds_ptr_maybe_dead(ptr)?;
266 (ptr.offset.bytes(), align)
268 Scalar::Bits { bits, size } => {
269 assert_eq!(size as u64, self.pointer_size().bytes());
270 assert!(bits < (1u128 << self.pointer_size().bits()));
271 // check this is not NULL
273 return err!(InvalidNullPointerUsage);
275 // the "base address" is 0 and hence always aligned
276 (bits as u64, required_align)
280 if alloc_align.bytes() < required_align.bytes() {
281 return err!(AlignmentCheckFailed {
283 required: required_align,
286 if offset % required_align.bytes() == 0 {
289 let has = offset % required_align.bytes();
290 err!(AlignmentCheckFailed {
291 has: Align::from_bytes(has).unwrap(),
292 required: required_align,
297 /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end
298 /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
299 /// in-bounds! This follows C's/LLVM's rules.
300 /// This function also works for deallocated allocations.
301 /// Use `.get(ptr.alloc_id)?.check_bounds_ptr(ptr)` if you want to force the allocation
302 /// to still be live.
303 /// If you want to check bounds before doing a memory access, better first obtain
304 /// an `Allocation` and call `check_bounds`.
305 pub fn check_bounds_ptr_maybe_dead(
307 ptr: Pointer<M::PointerTag>,
308 ) -> EvalResult<'tcx, Align> {
309 let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id);
310 ptr.check_in_alloc(allocation_size, InboundsCheck::MaybeDead)?;
315 /// Allocation accessors
316 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
317 /// Helper function to obtain the global (tcx) allocation for a static.
318 /// This attempts to return a reference to an existing allocation if
319 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
320 /// this machine use the same pointer tag, so it is indirected through
321 /// `M::static_with_default_tag`.
324 tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
325 memory_extra: &M::MemoryExtra,
326 ) -> EvalResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
327 let alloc = tcx.alloc_map.lock().get(id);
328 let def_id = match alloc {
329 Some(AllocType::Memory(mem)) => {
330 // We got tcx memory. Let the machine figure out whether and how to
331 // turn that into memory with the right pointer tag.
332 return Ok(M::adjust_static_allocation(mem, memory_extra))
334 Some(AllocType::Function(..)) => {
335 return err!(DerefFunctionPointer)
337 Some(AllocType::Static(did)) => {
341 return err!(DanglingPointerDeref),
343 // We got a "lazy" static that has not been computed yet, do some work
344 trace!("static_alloc: Need to compute {:?}", def_id);
345 if tcx.is_foreign_item(def_id) {
346 return M::find_foreign_static(def_id, tcx, memory_extra);
348 let instance = Instance::mono(tcx.tcx, def_id);
353 // use the raw query here to break validation cycles. Later uses of the static will call the
355 tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
356 // no need to report anything, the const_eval call takes care of that for statics
357 assert!(tcx.is_static(def_id).is_some());
359 ErrorHandled::Reported => EvalErrorKind::ReferencedConstant.into(),
360 ErrorHandled::TooGeneric => EvalErrorKind::TooGeneric.into(),
363 let allocation = tcx.alloc_map.lock().unwrap_memory(raw_const.alloc_id);
364 // We got tcx memory. Let the machine figure out whether and how to
365 // turn that into memory with the right pointer tag.
366 M::adjust_static_allocation(allocation, memory_extra)
370 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
371 // The error type of the inner closure here is somewhat funny. We have two
372 // ways of "erroring": An actual error, or because we got a reference from
373 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
374 // So the error type is `EvalResult<'tcx, &Allocation<M::PointerTag>>`.
375 let a = self.alloc_map.get_or(id, || {
376 let alloc = Self::get_static_alloc(id, self.tcx, &self.extra).map_err(Err)?;
378 Cow::Borrowed(alloc) => {
379 // We got a ref, cheaply return that as an "error" so that the
380 // map does not get mutated.
383 Cow::Owned(alloc) => {
384 // Need to put it into the map and return a ref to that
385 let kind = M::STATIC_KIND.expect(
386 "I got an owned allocation that I have to copy but the machine does \
387 not expect that to happen"
389 Ok((MemoryKind::Machine(kind), alloc))
393 // Now unpack that funny error type
403 ) -> EvalResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
405 let memory_extra = &self.extra;
406 let a = self.alloc_map.get_mut_or(id, || {
407 // Need to make a copy, even if `get_static_alloc` is able
408 // to give us a cheap reference.
409 let alloc = Self::get_static_alloc(id, tcx, memory_extra)?;
410 if alloc.mutability == Mutability::Immutable {
411 return err!(ModifiedConstantMemory);
413 match M::STATIC_KIND {
414 Some(kind) => Ok((MemoryKind::Machine(kind), alloc.into_owned())),
415 None => err!(ModifiedStatic),
418 // Unpack the error type manually because type inference doesn't
419 // work otherwise (and we cannot help it because `impl Trait`)
424 if a.mutability == Mutability::Immutable {
425 return err!(ModifiedConstantMemory);
432 pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) {
433 if let Ok(alloc) = self.get(id) {
434 return (Size::from_bytes(alloc.bytes.len() as u64), alloc.align);
436 // Could also be a fn ptr or extern static
437 match self.tcx.alloc_map.lock().get(id) {
438 Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()),
439 Some(AllocType::Static(did)) => {
440 // The only way `get` couldn't have worked here is if this is an extern static
441 assert!(self.tcx.is_foreign_item(did));
442 // Use size and align of the type
443 let ty = self.tcx.type_of(did);
444 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
445 (layout.size, layout.align.abi)
448 // Must be a deallocated pointer
449 *self.dead_alloc_map.get(&id).expect(
450 "allocation missing in dead_alloc_map"
456 pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, Instance<'tcx>> {
457 if ptr.offset.bytes() != 0 {
458 return err!(InvalidFunctionPointer);
460 trace!("reading fn ptr: {}", ptr.alloc_id);
461 match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
462 Some(AllocType::Function(instance)) => Ok(instance),
463 _ => Err(EvalErrorKind::ExecuteMemory.into()),
467 pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> {
468 self.get_mut(id)?.mutability = Mutability::Immutable;
472 /// For debugging, print an allocation and all allocations it points to, recursively.
473 pub fn dump_alloc(&self, id: AllocId) {
474 self.dump_allocs(vec![id]);
477 fn dump_alloc_helper<Tag, Extra>(
479 allocs_seen: &mut FxHashSet<AllocId>,
480 allocs_to_print: &mut VecDeque<AllocId>,
482 alloc: &Allocation<Tag, Extra>,
487 let prefix_len = msg.len();
488 let mut relocations = vec![];
490 for i in 0..(alloc.bytes.len() as u64) {
491 let i = Size::from_bytes(i);
492 if let Some(&(_, target_id)) = alloc.relocations.get(&i) {
493 if allocs_seen.insert(target_id) {
494 allocs_to_print.push_back(target_id);
496 relocations.push((i, target_id));
498 if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
499 // this `as usize` is fine, since `i` came from a `usize`
500 write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
507 "{}({} bytes, alignment {}){}",
514 if !relocations.is_empty() {
516 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
517 let mut pos = Size::ZERO;
518 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
519 for (i, target_id) in relocations {
520 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
521 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
522 let target = format!("({})", target_id);
523 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
524 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
525 pos = i + self.pointer_size();
531 /// For debugging, print a list of allocations and all allocations they point to, recursively.
532 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
533 if !log_enabled!(::log::Level::Trace) {
538 let mut allocs_to_print = VecDeque::from(allocs);
539 let mut allocs_seen = FxHashSet::default();
541 while let Some(id) = allocs_to_print.pop_front() {
542 let msg = format!("Alloc {:<5} ", format!("{}:", id));
545 match self.alloc_map.get_or(id, || Err(())) {
546 Ok((kind, alloc)) => {
547 let extra = match kind {
548 MemoryKind::Stack => " (stack)".to_owned(),
549 MemoryKind::Vtable => " (vtable)".to_owned(),
550 MemoryKind::Machine(m) => format!(" ({:?})", m),
552 self.dump_alloc_helper(
553 &mut allocs_seen, &mut allocs_to_print,
559 match self.tcx.alloc_map.lock().get(id) {
560 Some(AllocType::Memory(alloc)) => {
561 self.dump_alloc_helper(
562 &mut allocs_seen, &mut allocs_to_print,
563 msg, alloc, " (immutable)".to_owned()
566 Some(AllocType::Function(func)) => {
567 trace!("{} {}", msg, func);
569 Some(AllocType::Static(did)) => {
570 trace!("{} {:?}", msg, did);
573 trace!("{} (deallocated)", msg);
582 pub fn leak_report(&self) -> usize {
583 trace!("### LEAK REPORT ###");
584 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
585 if kind.may_leak() { None } else { Some(id) }
588 self.dump_allocs(leaks);
592 /// This is used by [priroda](https://github.com/oli-obk/priroda)
593 pub fn alloc_map(&self) -> &M::MemoryMap {
599 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
602 ptr: Scalar<M::PointerTag>,
604 ) -> EvalResult<'tcx, &[u8]> {
605 if size.bytes() == 0 {
608 let ptr = ptr.to_ptr()?;
609 self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
614 /// Interning (for CTFE)
615 impl<'a, 'mir, 'tcx, M> Memory<'a, 'mir, 'tcx, M>
617 M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=(), MemoryExtra=()>,
618 // FIXME: Working around https://github.com/rust-lang/rust/issues/24159
619 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
621 /// mark an allocation as static and initialized, either mutable or not
622 pub fn intern_static(
625 mutability: Mutability,
626 ) -> EvalResult<'tcx> {
628 "mark_static_initialized {:?}, mutability: {:?}",
633 let (kind, mut alloc) = self.alloc_map.remove(&alloc_id).unwrap();
635 MemoryKind::Machine(_) => bug!("Static cannot refer to machine memory"),
636 MemoryKind::Stack | MemoryKind::Vtable => {},
638 // ensure llvm knows not to put this into immutable memory
639 alloc.mutability = mutability;
640 let alloc = self.tcx.intern_const_alloc(alloc);
641 self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc);
642 // recurse into inner allocations
643 for &(_, alloc) in alloc.relocations.values() {
644 // FIXME: Reusing the mutability here is likely incorrect. It is originally
645 // determined via `is_freeze`, and data is considered frozen if there is no
646 // `UnsafeCell` *immediately* in that data -- however, this search stops
647 // at references. So whenever we follow a reference, we should likely
648 // assume immutability -- and we should make sure that the compiler
649 // does not permit code that would break this!
650 if self.alloc_map.contains_key(&alloc) {
651 // Not yet interned, so proceed recursively
652 self.intern_static(alloc, mutability)?;
653 } else if self.dead_alloc_map.contains_key(&alloc) {
655 return err!(ValidationFailure(
656 "encountered dangling pointer in final constant".into(),
664 /// Reading and writing
665 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
668 src: Scalar<M::PointerTag>,
670 dest: Scalar<M::PointerTag>,
673 nonoverlapping: bool,
674 ) -> EvalResult<'tcx> {
675 self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
678 pub fn copy_repeatedly(
680 src: Scalar<M::PointerTag>,
682 dest: Scalar<M::PointerTag>,
686 nonoverlapping: bool,
687 ) -> EvalResult<'tcx> {
688 self.check_align(src, src_align)?;
689 self.check_align(dest, dest_align)?;
690 if size.bytes() == 0 {
691 // Nothing to do for ZST, other than checking alignment and
692 // non-NULLness which already happened.
695 let src = src.to_ptr()?;
696 let dest = dest.to_ptr()?;
698 // first copy the relocations to a temporary buffer, because
699 // `get_bytes_mut` will clear the relocations, which is correct,
700 // since we don't want to keep any relocations at the target.
701 // (`get_bytes_with_undef_and_ptr` below checks that there are no
702 // relocations overlapping the edges; those would not be handled correctly).
704 let relocations = self.get(src.alloc_id)?.relocations(self, src, size);
705 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
707 new_relocations.extend(
710 .map(|&(offset, reloc)| {
711 (offset + dest.offset - src.offset + (i * size * relocations.len() as u64),
720 let tcx = self.tcx.tcx;
722 // This checks relocation edges on the src.
723 let src_bytes = self.get(src.alloc_id)?
724 .get_bytes_with_undef_and_ptr(&tcx, src, size)?
726 let dest_bytes = self.get_mut(dest.alloc_id)?
727 .get_bytes_mut(&tcx, dest, size * length)?
730 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
731 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
732 // `dest` could possibly overlap.
733 // The pointers above remain valid even if the `HashMap` table is moved around because they
734 // point into the `Vec` storing the bytes.
736 assert_eq!(size.bytes() as usize as u64, size.bytes());
737 if src.alloc_id == dest.alloc_id {
739 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
740 (dest.offset <= src.offset && dest.offset + size > src.offset)
742 return err!(Intrinsic(
743 "copy_nonoverlapping called on overlapping ranges".to_string(),
750 dest_bytes.offset((size.bytes() * i) as isize),
751 size.bytes() as usize);
755 ptr::copy_nonoverlapping(src_bytes,
756 dest_bytes.offset((size.bytes() * i) as isize),
757 size.bytes() as usize);
762 // copy definedness to the destination
763 self.copy_undef_mask(src, dest, size, length)?;
764 // copy the relocations to the destination
765 self.get_mut(dest.alloc_id)?.relocations.insert_presorted(relocations);
772 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
773 // FIXME: Add a fast version for the common, nonoverlapping case
776 src: Pointer<M::PointerTag>,
777 dest: Pointer<M::PointerTag>,
780 ) -> EvalResult<'tcx> {
781 // The bits have to be saved locally before writing to dest in case src and dest overlap.
782 assert_eq!(size.bytes() as usize as u64, size.bytes());
784 let undef_mask = self.get(src.alloc_id)?.undef_mask.clone();
785 let dest_allocation = self.get_mut(dest.alloc_id)?;
787 for i in 0..size.bytes() {
788 let defined = undef_mask.get(src.offset + Size::from_bytes(i));
791 dest_allocation.undef_mask.set(
792 dest.offset + Size::from_bytes(i + (size.bytes() * j)),