1 use std::collections::VecDeque;
2 use std::hash::{Hash, Hasher};
5 use rustc::hir::def_id::DefId;
6 use rustc::ty::Instance;
7 use rustc::ty::ParamEnv;
8 use rustc::ty::query::TyCtxtAt;
9 use rustc::ty::layout::{self, Align, TargetDataLayout, Size};
10 use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef,
11 EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType};
12 pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint};
13 use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher};
15 use syntax::ast::Mutability;
17 use super::{EvalContext, Machine};
19 ////////////////////////////////////////////////////////////////////////////////
20 // Allocations and pointers
21 ////////////////////////////////////////////////////////////////////////////////
23 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
24 pub enum MemoryKind<T> {
25 /// Error if deallocated except during a stack pop
27 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
31 ////////////////////////////////////////////////////////////////////////////////
32 // Top-level interpreter memory
33 ////////////////////////////////////////////////////////////////////////////////
36 pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
37 /// Additional data required by the Machine
38 pub data: M::MemoryData,
40 /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
41 alloc_kind: FxHashMap<AllocId, MemoryKind<M::MemoryKinds>>,
43 /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
44 alloc_map: FxHashMap<AllocId, Allocation>,
46 /// The current stack frame. Used to check accesses against locks.
49 pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
52 impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M>
53 where M: Machine<'mir, 'tcx>,
57 impl<'a, 'mir, 'tcx, M> PartialEq for Memory<'a, 'mir, 'tcx, M>
58 where M: Machine<'mir, 'tcx>,
61 fn eq(&self, other: &Self) -> bool {
71 && *alloc_kind == other.alloc_kind
72 && *alloc_map == other.alloc_map
73 && *cur_frame == other.cur_frame
77 impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M>
78 where M: Machine<'mir, 'tcx>,
81 fn hash<H: Hasher>(&self, state: &mut H) {
91 cur_frame.hash(state);
93 // We ignore some fields which don't change between evaluation steps.
95 // Since HashMaps which contain the same items may have different
96 // iteration orders, we use a commutative operation (in this case
97 // addition, but XOR would also work), to combine the hash of each
101 let mut h = FxHasher::default();
105 .fold(0u64, |hash, x| hash.wrapping_add(x))
110 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
111 pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>, data: M::MemoryData) -> Self {
114 alloc_kind: FxHashMap::default(),
115 alloc_map: FxHashMap::default(),
117 cur_frame: usize::max_value(),
121 pub fn allocations<'x>(
123 ) -> impl Iterator<Item = (AllocId, &'x Allocation)> {
124 self.alloc_map.iter().map(|(&id, alloc)| (id, alloc))
127 pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer {
128 self.tcx.alloc_map.lock().create_fn_alloc(instance).into()
131 pub fn allocate_bytes(&mut self, bytes: &[u8]) -> Pointer {
132 self.tcx.allocate_bytes(bytes).into()
135 /// kind is `None` for statics
136 pub fn allocate_value(
139 kind: MemoryKind<M::MemoryKinds>,
140 ) -> EvalResult<'tcx, AllocId> {
141 let id = self.tcx.alloc_map.lock().reserve();
142 M::add_lock(self, id);
143 self.alloc_map.insert(id, alloc);
144 self.alloc_kind.insert(id, kind);
148 /// kind is `None` for statics
153 kind: MemoryKind<M::MemoryKinds>,
154 ) -> EvalResult<'tcx, Pointer> {
155 self.allocate_value(Allocation::undef(size, align), kind).map(Pointer::from)
165 kind: MemoryKind<M::MemoryKinds>,
166 ) -> EvalResult<'tcx, Pointer> {
167 if ptr.offset.bytes() != 0 {
168 return err!(ReallocateNonBasePtr);
170 if self.alloc_map.contains_key(&ptr.alloc_id) {
171 let alloc_kind = self.alloc_kind[&ptr.alloc_id];
172 if alloc_kind != kind {
173 return err!(ReallocatedWrongMemoryKind(
174 format!("{:?}", alloc_kind),
175 format!("{:?}", kind),
180 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc"
181 let new_ptr = self.allocate(new_size, new_align, kind)?;
187 old_size.min(new_size),
191 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
196 pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> {
197 match self.alloc_kind.get(&ptr.alloc_id).cloned() {
198 Some(MemoryKind::Stack) => self.deallocate(ptr, None, MemoryKind::Stack),
199 // Happens if the memory was interned into immutable memory
201 other => bug!("local contained non-stack memory: {:?}", other),
208 size_and_align: Option<(Size, Align)>,
209 kind: MemoryKind<M::MemoryKinds>,
210 ) -> EvalResult<'tcx> {
211 if ptr.offset.bytes() != 0 {
212 return err!(DeallocateNonBasePtr);
215 let alloc = match self.alloc_map.remove(&ptr.alloc_id) {
216 Some(alloc) => alloc,
218 return match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
219 Some(AllocType::Function(..)) => err!(DeallocatedWrongMemoryKind(
220 "function".to_string(),
221 format!("{:?}", kind),
223 Some(AllocType::Static(..)) |
224 Some(AllocType::Memory(..)) => err!(DeallocatedWrongMemoryKind(
225 "static".to_string(),
226 format!("{:?}", kind),
228 None => err!(DoubleFree)
233 let alloc_kind = self.alloc_kind.remove(&ptr.alloc_id).expect("alloc_map out of sync with alloc_kind");
235 // It is okay for us to still holds locks on deallocation -- for example, we could store data we own
236 // in a local, and the local could be deallocated (from StorageDead) before the function returns.
237 // However, we should check *something*. For now, we make sure that there is no conflicting write
238 // lock by another frame. We *have* to permit deallocation if we hold a read lock.
239 // TODO: Figure out the exact rules here.
240 M::free_lock(self, ptr.alloc_id, alloc.bytes.len() as u64)?;
242 if alloc_kind != kind {
243 return err!(DeallocatedWrongMemoryKind(
244 format!("{:?}", alloc_kind),
245 format!("{:?}", kind),
248 if let Some((size, align)) = size_and_align {
249 if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
250 return err!(IncorrectAllocationInformation(size, Size::from_bytes(alloc.bytes.len() as u64), align, alloc.align));
254 debug!("deallocated : {}", ptr.alloc_id);
259 pub fn pointer_size(&self) -> Size {
260 self.tcx.data_layout.pointer_size
263 pub fn endianness(&self) -> layout::Endian {
264 self.tcx.data_layout.endian
267 /// Check that the pointer is aligned AND non-NULL.
268 pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> {
269 // Check non-NULL/Undef, extract offset
270 let (offset, alloc_align) = match ptr {
271 Scalar::Ptr(ptr) => {
272 let alloc = self.get(ptr.alloc_id)?;
273 (ptr.offset.bytes(), alloc.align)
275 Scalar::Bits { bits, size } => {
276 assert_eq!(size as u64, self.pointer_size().bytes());
277 // FIXME: what on earth does this line do? docs or fix needed!
278 let v = ((bits as u128) % (1 << self.pointer_size().bytes())) as u64;
280 return err!(InvalidNullPointerUsage);
282 // the base address if the "integer allocation" is 0 and hence always aligned
287 if alloc_align.abi() < required_align.abi() {
288 return err!(AlignmentCheckFailed {
290 required: required_align,
293 if offset % required_align.abi() == 0 {
296 let has = offset % required_align.abi();
297 err!(AlignmentCheckFailed {
298 has: Align::from_bytes(has, has).unwrap(),
299 required: required_align,
304 pub fn check_bounds(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> {
305 let alloc = self.get(ptr.alloc_id)?;
306 let allocation_size = alloc.bytes.len() as u64;
307 if ptr.offset.bytes() > allocation_size {
308 return err!(PointerOutOfBounds {
311 allocation_size: Size::from_bytes(allocation_size),
318 /// Allocation accessors
319 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
320 fn const_eval_static(&self, def_id: DefId) -> EvalResult<'tcx, &'tcx Allocation> {
321 if self.tcx.is_foreign_item(def_id) {
322 return err!(ReadForeignStatic);
324 let instance = Instance::mono(self.tcx.tcx, def_id);
329 self.tcx.const_eval(ParamEnv::reveal_all().and(gid)).map_err(|err| {
330 // no need to report anything, the const_eval call takes care of that for statics
331 assert!(self.tcx.is_static(def_id).is_some());
332 EvalErrorKind::ReferencedConstant(err).into()
334 self.tcx.const_value_to_allocation(val)
338 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
340 match self.alloc_map.get(&id) {
341 Some(alloc) => Ok(alloc),
342 // uninitialized static alloc?
345 let alloc = self.tcx.alloc_map.lock().get(id);
347 Some(AllocType::Memory(mem)) => Ok(mem),
348 Some(AllocType::Function(..)) => {
349 Err(EvalErrorKind::DerefFunctionPointer.into())
351 Some(AllocType::Static(did)) => {
352 self.const_eval_static(did)
354 None => Err(EvalErrorKind::DanglingPointerDeref.into()),
363 ) -> EvalResult<'tcx, &mut Allocation> {
365 match self.alloc_map.get_mut(&id) {
366 Some(alloc) => Ok(alloc),
367 // uninitialized static alloc?
369 // no alloc or immutable alloc? produce an error
370 match self.tcx.alloc_map.lock().get(id) {
371 Some(AllocType::Memory(..)) |
372 Some(AllocType::Static(..)) => err!(ModifiedConstantMemory),
373 Some(AllocType::Function(..)) => err!(DerefFunctionPointer),
374 None => err!(DanglingPointerDeref),
380 pub fn get_fn(&self, ptr: Pointer) -> EvalResult<'tcx, Instance<'tcx>> {
381 if ptr.offset.bytes() != 0 {
382 return err!(InvalidFunctionPointer);
384 debug!("reading fn ptr: {}", ptr.alloc_id);
385 match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
386 Some(AllocType::Function(instance)) => Ok(instance),
387 _ => Err(EvalErrorKind::ExecuteMemory.into()),
391 pub fn get_alloc_kind(&self, id: AllocId) -> Option<MemoryKind<M::MemoryKinds>> {
392 self.alloc_kind.get(&id).cloned()
395 /// For debugging, print an allocation and all allocations it points to, recursively.
396 pub fn dump_alloc(&self, id: AllocId) {
397 if !log_enabled!(::log::Level::Trace) {
400 self.dump_allocs(vec![id]);
403 /// For debugging, print a list of allocations and all allocations they point to, recursively.
404 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
405 if !log_enabled!(::log::Level::Trace) {
411 let mut allocs_to_print = VecDeque::from(allocs);
412 let mut allocs_seen = FxHashSet::default();
414 while let Some(id) = allocs_to_print.pop_front() {
415 let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
416 let prefix_len = msg.len();
417 let mut relocations = vec![];
419 let (alloc, immutable) =
421 match self.alloc_map.get(&id) {
422 Some(a) => (a, match self.alloc_kind[&id] {
423 MemoryKind::Stack => " (stack)".to_owned(),
424 MemoryKind::Machine(m) => format!(" ({:?})", m),
428 match self.tcx.alloc_map.lock().get(id) {
429 Some(AllocType::Memory(a)) => (a, "(immutable)".to_owned()),
430 Some(AllocType::Function(func)) => {
431 trace!("{} {}", msg, func);
434 Some(AllocType::Static(did)) => {
435 trace!("{} {:?}", msg, did);
439 trace!("{} (deallocated)", msg);
446 for i in 0..(alloc.bytes.len() as u64) {
447 let i = Size::from_bytes(i);
448 if let Some(&target_id) = alloc.relocations.get(&i) {
449 if allocs_seen.insert(target_id) {
450 allocs_to_print.push_back(target_id);
452 relocations.push((i, target_id));
454 if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)) {
455 // this `as usize` is fine, since `i` came from a `usize`
456 write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
463 "{}({} bytes, alignment {}){}",
470 if !relocations.is_empty() {
472 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
473 let mut pos = Size::ZERO;
474 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
475 for (i, target_id) in relocations {
476 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
477 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
478 let target = format!("({})", target_id);
479 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
480 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
481 pos = i + self.pointer_size();
488 pub fn leak_report(&self) -> usize {
489 trace!("### LEAK REPORT ###");
490 let leaks: Vec<_> = self.alloc_map
495 self.dump_allocs(leaks);
501 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
502 fn get_bytes_unchecked(
507 ) -> EvalResult<'tcx, &[u8]> {
508 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
509 self.check_align(ptr.into(), align)?;
510 if size.bytes() == 0 {
513 M::check_locks(self, ptr, size, AccessKind::Read)?;
514 self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
515 let alloc = self.get(ptr.alloc_id)?;
516 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
517 assert_eq!(size.bytes() as usize as u64, size.bytes());
518 let offset = ptr.offset.bytes() as usize;
519 Ok(&alloc.bytes[offset..offset + size.bytes() as usize])
522 fn get_bytes_unchecked_mut(
527 ) -> EvalResult<'tcx, &mut [u8]> {
528 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
529 self.check_align(ptr.into(), align)?;
530 if size.bytes() == 0 {
533 M::check_locks(self, ptr, size, AccessKind::Write)?;
534 self.check_bounds(ptr.offset(size, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
535 let alloc = self.get_mut(ptr.alloc_id)?;
536 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
537 assert_eq!(size.bytes() as usize as u64, size.bytes());
538 let offset = ptr.offset.bytes() as usize;
539 Ok(&mut alloc.bytes[offset..offset + size.bytes() as usize])
542 fn get_bytes(&self, ptr: Pointer, size: Size, align: Align) -> EvalResult<'tcx, &[u8]> {
543 assert_ne!(size.bytes(), 0);
544 if self.relocations(ptr, size)?.len() != 0 {
545 return err!(ReadPointerAsBytes);
547 self.check_defined(ptr, size)?;
548 self.get_bytes_unchecked(ptr, size, align)
556 ) -> EvalResult<'tcx, &mut [u8]> {
557 assert_ne!(size.bytes(), 0);
558 self.clear_relocations(ptr, size)?;
559 self.mark_definedness(ptr.into(), size, true)?;
560 self.get_bytes_unchecked_mut(ptr, size, align)
564 /// Reading and writing
565 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
566 /// mark an allocation pointed to by a static as static and initialized
567 fn mark_inner_allocation_initialized(
570 mutability: Mutability,
571 ) -> EvalResult<'tcx> {
572 match self.alloc_kind.get(&alloc) {
573 // do not go into statics
575 // just locals and machine allocs
576 Some(_) => self.mark_static_initialized(alloc, mutability),
580 /// mark an allocation as static and initialized, either mutable or not
581 pub fn mark_static_initialized(
584 mutability: Mutability,
585 ) -> EvalResult<'tcx> {
587 "mark_static_initialized {:?}, mutability: {:?}",
591 // The machine handled it
592 if M::mark_static_initialized(self, alloc_id, mutability)? {
595 let alloc = self.alloc_map.remove(&alloc_id);
596 match self.alloc_kind.remove(&alloc_id) {
598 Some(MemoryKind::Machine(_)) => bug!("machine didn't handle machine alloc"),
599 Some(MemoryKind::Stack) => {},
601 if let Some(mut alloc) = alloc {
602 // ensure llvm knows not to put this into immutable memroy
603 alloc.runtime_mutability = mutability;
604 let alloc = self.tcx.intern_const_alloc(alloc);
605 self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc);
606 // recurse into inner allocations
607 for &alloc in alloc.relocations.values() {
608 self.mark_inner_allocation_initialized(alloc, mutability)?;
611 bug!("no allocation found for {:?}", alloc_id);
623 nonoverlapping: bool,
624 ) -> EvalResult<'tcx> {
625 self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
628 pub fn copy_repeatedly(
636 nonoverlapping: bool,
637 ) -> EvalResult<'tcx> {
638 // Empty accesses don't need to be valid pointers, but they should still be aligned
639 self.check_align(src, src_align)?;
640 self.check_align(dest, dest_align)?;
641 if size.bytes() == 0 {
644 let src = src.to_ptr()?;
645 let dest = dest.to_ptr()?;
646 self.check_relocation_edges(src, size)?;
648 // first copy the relocations to a temporary buffer, because
649 // `get_bytes_mut` will clear the relocations, which is correct,
650 // since we don't want to keep any relocations at the target.
652 let relocations = self.relocations(src, size)?;
653 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
655 new_relocations.extend(
658 .map(|&(offset, alloc_id)| {
659 (offset + dest.offset - src.offset + (i * size * relocations.len() as u64), alloc_id)
667 let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr();
668 let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr();
670 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
671 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
672 // `dest` could possibly overlap.
674 assert_eq!(size.bytes() as usize as u64, size.bytes());
675 if src.alloc_id == dest.alloc_id {
677 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
678 (dest.offset <= src.offset && dest.offset + size > src.offset)
680 return err!(Intrinsic(
681 "copy_nonoverlapping called on overlapping ranges".to_string(),
687 ptr::copy(src_bytes, dest_bytes.offset((size.bytes() * i) as isize), size.bytes() as usize);
691 ptr::copy_nonoverlapping(src_bytes, dest_bytes.offset((size.bytes() * i) as isize), size.bytes() as usize);
696 self.copy_undef_mask(src, dest, size, length)?;
697 // copy back the relocations
698 self.get_mut(dest.alloc_id)?.relocations.insert_presorted(relocations);
703 pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> {
704 let alloc = self.get(ptr.alloc_id)?;
705 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
706 let offset = ptr.offset.bytes() as usize;
707 match alloc.bytes[offset..].iter().position(|&c| c == 0) {
709 let p1 = Size::from_bytes((size + 1) as u64);
710 if self.relocations(ptr, p1)?.len() != 0 {
711 return err!(ReadPointerAsBytes);
713 self.check_defined(ptr, p1)?;
714 M::check_locks(self, ptr, p1, AccessKind::Read)?;
715 Ok(&alloc.bytes[offset..offset + size])
717 None => err!(UnterminatedCString(ptr)),
721 pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> {
722 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
723 let align = Align::from_bytes(1, 1).unwrap();
724 self.check_align(ptr, align)?;
725 if size.bytes() == 0 {
728 self.get_bytes(ptr.to_ptr()?, size, align)
731 pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> {
732 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
733 let align = Align::from_bytes(1, 1).unwrap();
734 self.check_align(ptr, align)?;
738 let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
739 bytes.clone_from_slice(src);
743 pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> {
744 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
745 let align = Align::from_bytes(1, 1).unwrap();
746 self.check_align(ptr, align)?;
747 if count.bytes() == 0 {
750 let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
757 pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> {
758 self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
759 let endianness = self.endianness();
760 let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
761 // Undef check happens *after* we established that the alignment is correct.
762 // We must not return Ok() for unaligned pointers!
763 if self.check_defined(ptr, size).is_err() {
764 // this inflates undefined bytes to the entire scalar, even if only a few bytes are undefined
765 return Ok(ScalarMaybeUndef::Undef);
767 // Now we do the actual reading
768 let bits = read_target_uint(endianness, bytes).unwrap();
769 // See if we got a pointer
770 if size != self.pointer_size() {
771 if self.relocations(ptr, size)?.len() != 0 {
772 return err!(ReadPointerAsBytes);
775 let alloc = self.get(ptr.alloc_id)?;
776 match alloc.relocations.get(&ptr.offset) {
777 Some(&alloc_id) => return Ok(ScalarMaybeUndef::Scalar(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into())),
781 // We don't. Just return the bits.
782 Ok(ScalarMaybeUndef::Scalar(Scalar::Bits {
784 size: size.bytes() as u8,
788 pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, ScalarMaybeUndef> {
789 self.read_scalar(ptr, ptr_align, self.pointer_size())
796 val: ScalarMaybeUndef,
800 ) -> EvalResult<'tcx> {
801 let endianness = self.endianness();
803 let val = match val {
804 ScalarMaybeUndef::Scalar(scalar) => scalar,
805 ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
808 let bytes = match val {
809 Scalar::Ptr(val) => {
810 assert_eq!(type_size, self.pointer_size());
811 val.offset.bytes() as u128
814 Scalar::Bits { size: 0, .. } => {
815 // nothing to do for ZSTs
816 assert_eq!(type_size.bytes(), 0);
820 Scalar::Bits { bits, size } => {
821 assert_eq!(size as u64, type_size.bytes());
826 let ptr = ptr.to_ptr()?;
829 let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(type_align))?;
831 write_target_int(endianness, dst, bytes as i128).unwrap();
833 write_target_uint(endianness, dst, bytes).unwrap();
837 // See if we have to also write a relocation
839 Scalar::Ptr(val) => {
840 self.get_mut(ptr.alloc_id)?.relocations.insert(
851 pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
852 let ptr_size = self.pointer_size();
853 self.write_scalar(ptr.into(), ptr_align, val, ptr_size, ptr_align, false)
856 fn int_align(&self, size: Size) -> Align {
857 // We assume pointer-sized integers have the same alignment as pointers.
858 // We also assume signed and unsigned integers of the same size have the same alignment.
859 let ity = match size.bytes() {
865 _ => bug!("bad integer size: {}", size.bytes()),
872 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
877 ) -> EvalResult<'tcx, &[(Size, AllocId)]> {
878 let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1);
879 let end = ptr.offset + size;
880 Ok(self.get(ptr.alloc_id)?.relocations.range(Size::from_bytes(start)..end))
883 fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
884 // Find the start and end of the given range and its outermost relocations.
885 let (first, last) = {
886 // Find all relocations overlapping the given range.
887 let relocations = self.relocations(ptr, size)?;
888 if relocations.is_empty() {
892 (relocations.first().unwrap().0,
893 relocations.last().unwrap().0 + self.pointer_size())
895 let start = ptr.offset;
896 let end = start + size;
898 let alloc = self.get_mut(ptr.alloc_id)?;
900 // Mark parts of the outermost relocations as undefined if they partially fall outside the
903 alloc.undef_mask.set_range(first, start, false);
906 alloc.undef_mask.set_range(end, last, false);
909 // Forget all the relocations.
910 alloc.relocations.remove_range(first..last);
915 fn check_relocation_edges(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
916 let overlapping_start = self.relocations(ptr, Size::ZERO)?.len();
917 let overlapping_end = self.relocations(ptr.offset(size, self)?, Size::ZERO)?.len();
918 if overlapping_start + overlapping_end != 0 {
919 return err!(ReadPointerAsBytes);
926 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
927 // FIXME(solson): This is a very naive, slow version.
934 ) -> EvalResult<'tcx> {
935 // The bits have to be saved locally before writing to dest in case src and dest overlap.
936 assert_eq!(size.bytes() as usize as u64, size.bytes());
938 let undef_mask = self.get(src.alloc_id)?.undef_mask.clone();
939 let dest_allocation = self.get_mut(dest.alloc_id)?;
941 for i in 0..size.bytes() {
942 let defined = undef_mask.get(src.offset + Size::from_bytes(i));
945 dest_allocation.undef_mask.set(
946 dest.offset + Size::from_bytes(i + (size.bytes() * j)),
955 fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
956 let alloc = self.get(ptr.alloc_id)?;
957 if !alloc.undef_mask.is_range_defined(
962 return err!(ReadUndefBytes);
967 pub fn mark_definedness(
972 ) -> EvalResult<'tcx> {
973 if size.bytes() == 0 {
976 let ptr = ptr.to_ptr()?;
977 let alloc = self.get_mut(ptr.alloc_id)?;
978 alloc.undef_mask.set_range(
987 ////////////////////////////////////////////////////////////////////////////////
988 // Unaligned accesses
989 ////////////////////////////////////////////////////////////////////////////////
991 pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
992 fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>;
993 fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>;
995 /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
996 /// this may have to perform a load.
1000 ) -> EvalResult<'tcx, ScalarMaybeUndef> {
1002 Value::ByRef(ptr, align) => {
1003 self.memory().read_ptr_sized(ptr.to_ptr()?, align)?
1005 Value::Scalar(ptr) |
1006 Value::ScalarPair(ptr, _) => ptr,
1010 fn into_ptr_vtable_pair(
1013 ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> {
1015 Value::ByRef(ref_ptr, align) => {
1016 let mem = self.memory();
1017 let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
1018 let vtable = mem.read_ptr_sized(
1019 ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
1021 )?.unwrap_or_err()?.to_ptr()?;
1025 Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.unwrap_or_err()?.to_ptr()?)),
1026 _ => bug!("expected ptr and vtable, got {:?}", value),
1033 ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> {
1035 Value::ByRef(ref_ptr, align) => {
1036 let mem = self.memory();
1037 let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
1038 let len = mem.read_ptr_sized(
1039 ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
1041 )?.unwrap_or_err()?.to_bits(mem.pointer_size())? as u64;
1044 Value::ScalarPair(ptr, val) => {
1045 let len = val.unwrap_or_err()?.to_bits(self.memory().pointer_size())?;
1046 Ok((ptr, len as u64))
1048 Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value),
1053 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> {
1055 fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
1060 fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
1065 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> {
1067 fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
1072 fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
1077 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M> {
1079 fn data_layout(&self) -> &TargetDataLayout {
1080 &self.tcx.data_layout