1 use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
2 use std::collections::{btree_map, BTreeMap, VecDeque};
5 use rustc::ty::Instance;
6 use rustc::ty::maps::TyCtxtAt;
7 use rustc::ty::layout::{self, Align, TargetDataLayout};
8 use syntax::ast::Mutability;
10 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
11 use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, Value, Pointer,
12 EvalResult, PrimVal, EvalErrorKind};
14 use super::{EvalContext, Machine};
16 ////////////////////////////////////////////////////////////////////////////////
17 // Allocations and pointers
18 ////////////////////////////////////////////////////////////////////////////////
20 #[derive(Debug, PartialEq, Copy, Clone)]
21 pub enum MemoryKind<T> {
22 /// Error if deallocated except during a stack pop
24 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
28 ////////////////////////////////////////////////////////////////////////////////
29 // Top-level interpreter memory
30 ////////////////////////////////////////////////////////////////////////////////
32 pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
33 /// Additional data required by the Machine
34 pub data: M::MemoryData,
36 /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
37 alloc_kind: FxHashMap<AllocId, MemoryKind<M::MemoryKinds>>,
39 /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
40 alloc_map: FxHashMap<AllocId, Allocation>,
42 /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
44 /// Stores statics while they are being processed, before they are interned and thus frozen
45 uninitialized_statics: FxHashMap<AllocId, Allocation>,
47 /// The current stack frame. Used to check accesses against locks.
50 pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
53 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
54 pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>, data: M::MemoryData) -> Self {
57 alloc_kind: FxHashMap::default(),
58 alloc_map: FxHashMap::default(),
59 uninitialized_statics: FxHashMap::default(),
61 cur_frame: usize::max_value(),
65 pub fn allocations<'x>(
67 ) -> impl Iterator<Item = (AllocId, &'x Allocation)> {
68 self.alloc_map.iter().map(|(&id, alloc)| (id, alloc))
71 pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer {
72 let id = self.tcx.interpret_interner.create_fn_alloc(instance);
73 MemoryPointer::new(id, 0)
76 pub fn allocate_cached(&mut self, bytes: &[u8]) -> MemoryPointer {
77 let id = self.tcx.allocate_cached(bytes);
78 MemoryPointer::new(id, 0)
81 /// kind is `None` for statics
86 kind: Option<MemoryKind<M::MemoryKinds>>,
87 ) -> EvalResult<'tcx, MemoryPointer> {
88 assert_eq!(size as usize as u64, size);
89 let alloc = Allocation {
90 bytes: vec![0; size as usize],
91 relocations: BTreeMap::new(),
92 undef_mask: UndefMask::new(size),
94 runtime_mutability: Mutability::Immutable,
96 let id = self.tcx.interpret_interner.reserve();
97 M::add_lock(self, id);
99 Some(kind @ MemoryKind::Stack) |
100 Some(kind @ MemoryKind::Machine(_)) => {
101 self.alloc_map.insert(id, alloc);
102 self.alloc_kind.insert(id, kind);
105 self.uninitialized_statics.insert(id, alloc);
108 Ok(MemoryPointer::new(id, 0))
118 kind: MemoryKind<M::MemoryKinds>,
119 ) -> EvalResult<'tcx, MemoryPointer> {
121 return err!(ReallocateNonBasePtr);
123 if self.alloc_map.contains_key(&ptr.alloc_id) {
124 let alloc_kind = self.alloc_kind[&ptr.alloc_id];
125 if alloc_kind != kind {
126 return err!(ReallocatedWrongMemoryKind(
127 format!("{:?}", alloc_kind),
128 format!("{:?}", kind),
133 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc"
134 let new_ptr = self.allocate(new_size, new_align, Some(kind))?;
140 old_size.min(new_size),
144 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
149 pub fn deallocate_local(&mut self, ptr: MemoryPointer) -> EvalResult<'tcx> {
150 match self.alloc_kind.get(&ptr.alloc_id).cloned() {
151 Some(MemoryKind::Stack) => self.deallocate(ptr, None, MemoryKind::Stack),
152 // Happens if the memory was interned into immutable memory
154 other => bug!("local contained non-stack memory: {:?}", other),
161 size_and_align: Option<(u64, Align)>,
162 kind: MemoryKind<M::MemoryKinds>,
163 ) -> EvalResult<'tcx> {
165 return err!(DeallocateNonBasePtr);
168 let alloc = match self.alloc_map.remove(&ptr.alloc_id) {
169 Some(alloc) => alloc,
170 None => if self.uninitialized_statics.contains_key(&ptr.alloc_id) {
171 return err!(DeallocatedWrongMemoryKind(
172 "uninitializedstatic".to_string(),
173 format!("{:?}", kind),
175 } else if self.tcx.interpret_interner.get_fn(ptr.alloc_id).is_some() {
176 return err!(DeallocatedWrongMemoryKind(
177 "function".to_string(),
178 format!("{:?}", kind),
180 } else if self.tcx.interpret_interner.get_alloc(ptr.alloc_id).is_some() {
181 return err!(DeallocatedWrongMemoryKind(
182 "static".to_string(),
183 format!("{:?}", kind),
186 return err!(DoubleFree)
190 let alloc_kind = self.alloc_kind.remove(&ptr.alloc_id).expect("alloc_map out of sync with alloc_kind");
192 // It is okay for us to still holds locks on deallocation -- for example, we could store data we own
193 // in a local, and the local could be deallocated (from StorageDead) before the function returns.
194 // However, we should check *something*. For now, we make sure that there is no conflicting write
195 // lock by another frame. We *have* to permit deallocation if we hold a read lock.
196 // TODO: Figure out the exact rules here.
197 M::free_lock(self, ptr.alloc_id, alloc.bytes.len() as u64)?;
199 if alloc_kind != kind {
200 return err!(DeallocatedWrongMemoryKind(
201 format!("{:?}", alloc_kind),
202 format!("{:?}", kind),
205 if let Some((size, align)) = size_and_align {
206 if size != alloc.bytes.len() as u64 || align != alloc.align {
207 return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align.abi(), alloc.align.abi()));
211 debug!("deallocated : {}", ptr.alloc_id);
216 pub fn pointer_size(&self) -> u64 {
217 self.tcx.data_layout.pointer_size.bytes()
220 pub fn endianness(&self) -> layout::Endian {
221 self.tcx.data_layout.endian
224 /// Check that the pointer is aligned AND non-NULL.
225 pub fn check_align(&self, ptr: Pointer, required_align: Align) -> EvalResult<'tcx> {
226 // Check non-NULL/Undef, extract offset
227 let (offset, alloc_align) = match ptr.into_inner_primval() {
228 PrimVal::Ptr(ptr) => {
229 let alloc = self.get(ptr.alloc_id)?;
230 (ptr.offset, alloc.align)
232 PrimVal::Bytes(bytes) => {
233 let v = ((bytes as u128) % (1 << self.pointer_size())) as u64;
235 return err!(InvalidNullPointerUsage);
237 // the base address if the "integer allocation" is 0 and hence always aligned
240 PrimVal::Undef => return err!(ReadUndefBytes),
243 if alloc_align.abi() < required_align.abi() {
244 return err!(AlignmentCheckFailed {
245 has: alloc_align.abi(),
246 required: required_align.abi(),
249 if offset % required_align.abi() == 0 {
252 err!(AlignmentCheckFailed {
253 has: offset % required_align.abi(),
254 required: required_align.abi(),
259 pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> {
260 let alloc = self.get(ptr.alloc_id)?;
261 let allocation_size = alloc.bytes.len() as u64;
262 if ptr.offset > allocation_size {
263 return err!(PointerOutOfBounds {
273 /// Allocation accessors
274 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
275 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
277 match self.alloc_map.get(&id) {
278 Some(alloc) => Ok(alloc),
279 // uninitialized static alloc?
280 None => match self.uninitialized_statics.get(&id) {
281 Some(alloc) => Ok(alloc),
284 self.tcx.interpret_interner.get_alloc(id)
285 // no alloc? produce an error
286 .ok_or_else(|| if self.tcx.interpret_interner.get_fn(id).is_some() {
287 EvalErrorKind::DerefFunctionPointer.into()
289 EvalErrorKind::DanglingPointerDeref.into()
299 ) -> EvalResult<'tcx, &mut Allocation> {
301 match self.alloc_map.get_mut(&id) {
302 Some(alloc) => Ok(alloc),
303 // uninitialized static alloc?
304 None => match self.uninitialized_statics.get_mut(&id) {
305 Some(alloc) => Ok(alloc),
307 // no alloc or immutable alloc? produce an error
308 if self.tcx.interpret_interner.get_alloc(id).is_some() {
309 err!(ModifiedConstantMemory)
310 } else if self.tcx.interpret_interner.get_fn(id).is_some() {
311 err!(DerefFunctionPointer)
313 err!(DanglingPointerDeref)
320 pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> {
322 return err!(InvalidFunctionPointer);
324 debug!("reading fn ptr: {}", ptr.alloc_id);
327 .get_fn(ptr.alloc_id)
328 .ok_or(EvalErrorKind::ExecuteMemory.into())
331 pub fn get_alloc_kind(&self, id: AllocId) -> Option<MemoryKind<M::MemoryKinds>> {
332 self.alloc_kind.get(&id).cloned()
335 /// For debugging, print an allocation and all allocations it points to, recursively.
336 pub fn dump_alloc(&self, id: AllocId) {
337 if !log_enabled!(::log::Level::Trace) {
340 self.dump_allocs(vec![id]);
343 /// For debugging, print a list of allocations and all allocations they point to, recursively.
344 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
345 if !log_enabled!(::log::Level::Trace) {
351 let mut allocs_to_print = VecDeque::from(allocs);
352 let mut allocs_seen = FxHashSet::default();
354 while let Some(id) = allocs_to_print.pop_front() {
355 let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
356 let prefix_len = msg.len();
357 let mut relocations = vec![];
359 let (alloc, immutable) =
361 match self.alloc_map.get(&id) {
362 Some(a) => (a, match self.alloc_kind[&id] {
363 MemoryKind::Stack => " (stack)".to_owned(),
364 MemoryKind::Machine(m) => format!(" ({:?})", m),
366 // uninitialized static alloc?
367 None => match self.uninitialized_statics.get(&id) {
368 Some(a) => (a, " (static in the process of initialization)".to_owned()),
371 match self.tcx.interpret_interner.get_alloc(id) {
372 Some(a) => (a, "(immutable)".to_owned()),
373 None => if let Some(func) = self.tcx.interpret_interner.get_fn(id) {
374 trace!("{} {}", msg, func);
377 trace!("{} (deallocated)", msg);
385 for i in 0..(alloc.bytes.len() as u64) {
386 if let Some(&target_id) = alloc.relocations.get(&i) {
387 if allocs_seen.insert(target_id) {
388 allocs_to_print.push_back(target_id);
390 relocations.push((i, target_id));
392 if alloc.undef_mask.is_range_defined(i, i + 1) {
393 // this `as usize` is fine, since `i` came from a `usize`
394 write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
401 "{}({} bytes, alignment {}){}",
408 if !relocations.is_empty() {
410 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
412 let relocation_width = (self.pointer_size() - 1) * 3;
413 for (i, target_id) in relocations {
414 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
415 write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
416 let target = format!("({})", target_id);
417 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
418 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
419 pos = i + self.pointer_size();
426 pub fn leak_report(&self) -> usize {
427 trace!("### LEAK REPORT ###");
428 let leaks: Vec<_> = self.alloc_map
433 self.dump_allocs(leaks);
439 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
440 fn get_bytes_unchecked(
445 ) -> EvalResult<'tcx, &[u8]> {
446 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
447 self.check_align(ptr.into(), align)?;
451 M::check_locks(self, ptr, size, AccessKind::Read)?;
452 self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
453 let alloc = self.get(ptr.alloc_id)?;
454 assert_eq!(ptr.offset as usize as u64, ptr.offset);
455 assert_eq!(size as usize as u64, size);
456 let offset = ptr.offset as usize;
457 Ok(&alloc.bytes[offset..offset + size as usize])
460 fn get_bytes_unchecked_mut(
465 ) -> EvalResult<'tcx, &mut [u8]> {
466 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
467 self.check_align(ptr.into(), align)?;
471 M::check_locks(self, ptr, size, AccessKind::Write)?;
472 self.check_bounds(ptr.offset(size, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
473 let alloc = self.get_mut(ptr.alloc_id)?;
474 assert_eq!(ptr.offset as usize as u64, ptr.offset);
475 assert_eq!(size as usize as u64, size);
476 let offset = ptr.offset as usize;
477 Ok(&mut alloc.bytes[offset..offset + size as usize])
480 fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: Align) -> EvalResult<'tcx, &[u8]> {
482 if self.relocations(ptr, size)?.count() != 0 {
483 return err!(ReadPointerAsBytes);
485 self.check_defined(ptr, size)?;
486 self.get_bytes_unchecked(ptr, size, align)
494 ) -> EvalResult<'tcx, &mut [u8]> {
496 self.clear_relocations(ptr, size)?;
497 self.mark_definedness(ptr.into(), size, true)?;
498 self.get_bytes_unchecked_mut(ptr, size, align)
502 /// Reading and writing
503 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
504 /// mark an allocation pointed to by a static as static and initialized
505 fn mark_inner_allocation_initialized(
508 mutability: Mutability,
509 ) -> EvalResult<'tcx> {
510 match self.alloc_kind.get(&alloc) {
511 // do not go into statics
513 // just locals and machine allocs
514 Some(_) => self.mark_static_initialized(alloc, mutability),
518 /// mark an allocation as static and initialized, either mutable or not
519 pub fn mark_static_initialized(
522 mutability: Mutability,
523 ) -> EvalResult<'tcx> {
525 "mark_static_initialized {:?}, mutability: {:?}",
529 // The machine handled it
530 if M::mark_static_initialized(self, alloc_id, mutability)? {
533 let alloc = self.alloc_map.remove(&alloc_id);
534 match self.alloc_kind.remove(&alloc_id) {
536 Some(MemoryKind::Machine(_)) => bug!("machine didn't handle machine alloc"),
537 Some(MemoryKind::Stack) => {},
539 let uninit = self.uninitialized_statics.remove(&alloc_id);
540 if let Some(mut alloc) = alloc.or(uninit) {
541 // ensure llvm knows not to put this into immutable memroy
542 alloc.runtime_mutability = mutability;
543 let alloc = self.tcx.intern_const_alloc(alloc);
544 self.tcx.interpret_interner.intern_at_reserved(alloc_id, alloc);
545 // recurse into inner allocations
546 for &alloc in alloc.relocations.values() {
547 self.mark_inner_allocation_initialized(alloc, mutability)?;
550 bug!("no allocation found for {:?}", alloc_id);
562 nonoverlapping: bool,
563 ) -> EvalResult<'tcx> {
564 // Empty accesses don't need to be valid pointers, but they should still be aligned
565 self.check_align(src, src_align)?;
566 self.check_align(dest, dest_align)?;
570 let src = src.to_ptr()?;
571 let dest = dest.to_ptr()?;
572 self.check_relocation_edges(src, size)?;
574 // first copy the relocations to a temporary buffer, because
575 // `get_bytes_mut` will clear the relocations, which is correct,
576 // since we don't want to keep any relocations at the target.
578 let relocations: Vec<_> = self.relocations(src, size)?
579 .map(|(&offset, &alloc_id)| {
580 // Update relocation offsets for the new positions in the destination allocation.
581 (offset + dest.offset - src.offset, alloc_id)
585 let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr();
586 let dest_bytes = self.get_bytes_mut(dest, size, dest_align)?.as_mut_ptr();
588 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
589 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
590 // `dest` could possibly overlap.
592 assert_eq!(size as usize as u64, size);
593 if src.alloc_id == dest.alloc_id {
595 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
596 (dest.offset <= src.offset && dest.offset + size > src.offset)
598 return err!(Intrinsic(
599 format!("copy_nonoverlapping called on overlapping ranges"),
603 ptr::copy(src_bytes, dest_bytes, size as usize);
605 ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
609 self.copy_undef_mask(src, dest, size)?;
610 // copy back the relocations
611 self.get_mut(dest.alloc_id)?.relocations.extend(relocations);
616 pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
617 let alloc = self.get(ptr.alloc_id)?;
618 assert_eq!(ptr.offset as usize as u64, ptr.offset);
619 let offset = ptr.offset as usize;
620 match alloc.bytes[offset..].iter().position(|&c| c == 0) {
622 if self.relocations(ptr, (size + 1) as u64)?.count() != 0 {
623 return err!(ReadPointerAsBytes);
625 self.check_defined(ptr, (size + 1) as u64)?;
626 M::check_locks(self, ptr, (size + 1) as u64, AccessKind::Read)?;
627 Ok(&alloc.bytes[offset..offset + size])
629 None => err!(UnterminatedCString(ptr)),
633 pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
634 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
635 let align = Align::from_bytes(1, 1).unwrap();
636 self.check_align(ptr, align)?;
640 self.get_bytes(ptr.to_ptr()?, size, align)
643 pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
644 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
645 let align = Align::from_bytes(1, 1).unwrap();
646 self.check_align(ptr, align)?;
650 let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, align)?;
651 bytes.clone_from_slice(src);
655 pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
656 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
657 let align = Align::from_bytes(1, 1).unwrap();
658 self.check_align(ptr, align)?;
662 let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
669 pub fn read_primval(&self, ptr: MemoryPointer, ptr_align: Align, size: u64) -> EvalResult<'tcx, PrimVal> {
670 self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
671 let endianness = self.endianness();
672 let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
673 // Undef check happens *after* we established that the alignment is correct.
674 // We must not return Ok() for unaligned pointers!
675 if self.check_defined(ptr, size).is_err() {
676 return Ok(PrimVal::Undef.into());
678 // Now we do the actual reading
679 let bytes = read_target_uint(endianness, bytes).unwrap();
680 // See if we got a pointer
681 if size != self.pointer_size() {
682 if self.relocations(ptr, size)?.count() != 0 {
683 return err!(ReadPointerAsBytes);
686 let alloc = self.get(ptr.alloc_id)?;
687 match alloc.relocations.get(&ptr.offset) {
688 Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, bytes as u64))),
692 // We don't. Just return the bytes.
693 Ok(PrimVal::Bytes(bytes))
696 pub fn read_ptr_sized(&self, ptr: MemoryPointer, ptr_align: Align) -> EvalResult<'tcx, PrimVal> {
697 self.read_primval(ptr, ptr_align, self.pointer_size())
700 pub fn write_primval(&mut self, ptr: Pointer, ptr_align: Align, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
701 let endianness = self.endianness();
703 let bytes = match val {
704 PrimVal::Ptr(val) => {
705 assert_eq!(size, self.pointer_size());
709 PrimVal::Bytes(bytes) => bytes,
712 self.check_align(ptr.into(), ptr_align)?;
713 self.mark_definedness(ptr, size, false)?;
718 let ptr = ptr.to_ptr()?;
721 let align = self.int_align(size);
722 let dst = self.get_bytes_mut(ptr, size, ptr_align.min(align))?;
724 write_target_int(endianness, dst, bytes as i128).unwrap();
726 write_target_uint(endianness, dst, bytes).unwrap();
730 // See if we have to also write a relocation
732 PrimVal::Ptr(val) => {
733 self.get_mut(ptr.alloc_id)?.relocations.insert(
744 pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, ptr_align: Align, val: PrimVal) -> EvalResult<'tcx> {
745 let ptr_size = self.pointer_size();
746 self.write_primval(ptr.into(), ptr_align, val, ptr_size, false)
749 fn int_align(&self, size: u64) -> Align {
750 // We assume pointer-sized integers have the same alignment as pointers.
751 // We also assume signed and unsigned integers of the same size have the same alignment.
752 let ity = match size {
758 _ => bug!("bad integer size: {}", size),
765 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
770 ) -> EvalResult<'tcx, btree_map::Range<u64, AllocId>> {
771 let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
772 let end = ptr.offset + size;
773 Ok(self.get(ptr.alloc_id)?.relocations.range(start..end))
776 fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
777 // Find all relocations overlapping the given range.
778 let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
783 // Find the start and end of the given range and its outermost relocations.
784 let start = ptr.offset;
785 let end = start + size;
786 let first = *keys.first().unwrap();
787 let last = *keys.last().unwrap() + self.pointer_size();
789 let alloc = self.get_mut(ptr.alloc_id)?;
791 // Mark parts of the outermost relocations as undefined if they partially fall outside the
794 alloc.undef_mask.set_range(first, start, false);
797 alloc.undef_mask.set_range(end, last, false);
800 // Forget all the relocations.
802 alloc.relocations.remove(&k);
808 fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
809 let overlapping_start = self.relocations(ptr, 0)?.count();
810 let overlapping_end = self.relocations(ptr.offset(size, self)?, 0)?.count();
811 if overlapping_start + overlapping_end != 0 {
812 return err!(ReadPointerAsBytes);
819 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
820 // FIXME(solson): This is a very naive, slow version.
826 ) -> EvalResult<'tcx> {
827 // The bits have to be saved locally before writing to dest in case src and dest overlap.
828 assert_eq!(size as usize as u64, size);
829 let mut v = Vec::with_capacity(size as usize);
831 let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
834 for (i, defined) in v.into_iter().enumerate() {
835 self.get_mut(dest.alloc_id)?.undef_mask.set(
844 fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
845 let alloc = self.get(ptr.alloc_id)?;
846 if !alloc.undef_mask.is_range_defined(
851 return err!(ReadUndefBytes);
856 pub fn mark_definedness(
861 ) -> EvalResult<'tcx> {
865 let ptr = ptr.to_ptr()?;
866 let alloc = self.get_mut(ptr.alloc_id)?;
867 alloc.undef_mask.set_range(
876 ////////////////////////////////////////////////////////////////////////////////
877 // Methods to access integers in the target endianness
878 ////////////////////////////////////////////////////////////////////////////////
880 pub fn write_target_uint(
881 endianness: layout::Endian,
882 mut target: &mut [u8],
884 ) -> Result<(), io::Error> {
885 let len = target.len();
887 layout::Endian::Little => target.write_uint128::<LittleEndian>(data, len),
888 layout::Endian::Big => target.write_uint128::<BigEndian>(data, len),
892 pub fn write_target_int(
893 endianness: layout::Endian,
894 mut target: &mut [u8],
896 ) -> Result<(), io::Error> {
897 let len = target.len();
899 layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
900 layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
904 pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
906 layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
907 layout::Endian::Big => source.read_uint128::<BigEndian>(source.len()),
911 ////////////////////////////////////////////////////////////////////////////////
912 // Unaligned accesses
913 ////////////////////////////////////////////////////////////////////////////////
915 pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
916 fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>;
917 fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>;
919 /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
920 /// this may have to perform a load.
924 ) -> EvalResult<'tcx, Pointer> {
926 Value::ByRef(ptr, align) => {
927 self.memory().read_ptr_sized(ptr.to_ptr()?, align)?
930 Value::ByValPair(ptr, _) => ptr,
934 fn into_ptr_vtable_pair(
937 ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
939 Value::ByRef(ref_ptr, align) => {
940 let mem = self.memory();
941 let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
942 let vtable = mem.read_ptr_sized(
943 ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
949 Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
951 Value::ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
952 _ => bug!("expected ptr and vtable, got {:?}", value),
959 ) -> EvalResult<'tcx, (Pointer, u64)> {
961 Value::ByRef(ref_ptr, align) => {
962 let mem = self.memory();
963 let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
964 let len = mem.read_ptr_sized(
965 ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
967 )?.to_bytes()? as u64;
970 Value::ByValPair(ptr, val) => {
971 let len = val.to_u128()?;
972 assert_eq!(len as u64 as u128, len);
973 Ok((ptr.into(), len as u64))
975 Value::ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
976 Value::ByVal(_) => bug!("expected ptr and length, got {:?}", value),
981 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> {
983 fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
988 fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
993 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> {
995 fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
1000 fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
1005 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M> {
1007 fn data_layout(&self) -> &TargetDataLayout {
1008 &self.tcx.data_layout