1 // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! The memory subsystem.
13 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
14 //! have a "size"-like parameter, and they take `Scalar` for the address because
15 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
16 //! integer. It is crucial that these operations call `check_align` *before*
17 //! short-circuiting the empty case!
19 use std::collections::VecDeque;
23 use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
24 use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
25 pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
26 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
28 use syntax::ast::Mutability;
31 Pointer, AllocId, Allocation, GlobalId, AllocationExtra, InboundsCheck,
32 EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
33 Machine, AllocMap, MayLeak, ScalarMaybeUndef, ErrorHandled,
36 #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
37 pub enum MemoryKind<T> {
38 /// Error if deallocated except during a stack pop
40 /// Error if ever deallocated
42 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
46 impl<T: MayLeak> MayLeak for MemoryKind<T> {
48 fn may_leak(self) -> bool {
50 MemoryKind::Stack => false,
51 MemoryKind::Vtable => true,
52 MemoryKind::Machine(k) => k.may_leak()
57 // `Memory` has to depend on the `Machine` because some of its operations
58 // (e.g. `get`) call a `Machine` hook.
59 pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
60 /// Allocations local to this instance of the miri engine. The kind
61 /// helps ensure that the same mechanism is used for allocation and
62 /// deallocation. When an allocation is not found here, it is a
63 /// static and looked up in the `tcx` for read access. Some machines may
64 /// have to mutate this map even on a read-only access to a static (because
65 /// they do pointer provenance tracking and the allocations in `tcx` have
66 /// the wrong type), so we let the machine override this type.
67 /// Either way, if the machine allows writing to a static, doing so will
68 /// create a copy of the static allocation here.
69 alloc_map: M::MemoryMap,
71 /// To be able to compare pointers with NULL, and to check alignment for accesses
72 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
73 /// that do not exist any more.
74 dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
76 /// Lets us implement `HasDataLayout`, which is awfully convenient.
77 pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
80 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
81 for Memory<'a, 'mir, 'tcx, M>
84 fn data_layout(&self) -> &TargetDataLayout {
89 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
90 // carefully copy only the reachable parts.
91 impl<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>>
92 Clone for Memory<'a, 'mir, 'tcx, M>
94 fn clone(&self) -> Self {
96 alloc_map: self.alloc_map.clone(),
97 dead_alloc_map: self.dead_alloc_map.clone(),
103 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
104 pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>) -> Self {
106 alloc_map: Default::default(),
107 dead_alloc_map: FxHashMap::default(),
112 pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer {
113 Pointer::from(self.tcx.alloc_map.lock().create_fn_alloc(instance))
116 pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer {
117 Pointer::from(self.tcx.allocate_bytes(bytes))
120 pub fn allocate_with(
122 alloc: Allocation<M::PointerTag, M::AllocExtra>,
123 kind: MemoryKind<M::MemoryKinds>,
124 ) -> EvalResult<'tcx, AllocId> {
125 let id = self.tcx.alloc_map.lock().reserve();
126 self.alloc_map.insert(id, (kind, alloc));
134 kind: MemoryKind<M::MemoryKinds>,
135 ) -> EvalResult<'tcx, Pointer> {
136 Ok(Pointer::from(self.allocate_with(Allocation::undef(size, align), kind)?))
141 ptr: Pointer<M::PointerTag>,
146 kind: MemoryKind<M::MemoryKinds>,
147 ) -> EvalResult<'tcx, Pointer> {
148 if ptr.offset.bytes() != 0 {
149 return err!(ReallocateNonBasePtr);
152 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
153 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
154 let new_ptr = self.allocate(new_size, new_align, kind)?;
158 new_ptr.with_default_tag().into(),
160 old_size.min(new_size),
161 /*nonoverlapping*/ true,
163 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
168 /// Deallocate a local, or do nothing if that local has been made into a static
169 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx> {
170 // The allocation might be already removed by static interning.
171 // This can only really happen in the CTFE instance, not in miri.
172 if self.alloc_map.contains_key(&ptr.alloc_id) {
173 self.deallocate(ptr, None, MemoryKind::Stack)
181 ptr: Pointer<M::PointerTag>,
182 size_and_align: Option<(Size, Align)>,
183 kind: MemoryKind<M::MemoryKinds>,
184 ) -> EvalResult<'tcx> {
185 trace!("deallocating: {}", ptr.alloc_id);
187 if ptr.offset.bytes() != 0 {
188 return err!(DeallocateNonBasePtr);
191 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
192 Some(alloc) => alloc,
194 // Deallocating static memory -- always an error
195 return match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
196 Some(AllocType::Function(..)) => err!(DeallocatedWrongMemoryKind(
197 "function".to_string(),
198 format!("{:?}", kind),
200 Some(AllocType::Static(..)) |
201 Some(AllocType::Memory(..)) => err!(DeallocatedWrongMemoryKind(
202 "static".to_string(),
203 format!("{:?}", kind),
205 None => err!(DoubleFree)
210 if alloc_kind != kind {
211 return err!(DeallocatedWrongMemoryKind(
212 format!("{:?}", alloc_kind),
213 format!("{:?}", kind),
216 if let Some((size, align)) = size_and_align {
217 if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
218 let bytes = Size::from_bytes(alloc.bytes.len() as u64);
219 return err!(IncorrectAllocationInformation(size,
226 // Let the machine take some extra action
227 let size = Size::from_bytes(alloc.bytes.len() as u64);
228 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
230 // Don't forget to remember size and align of this now-dead allocation
231 let old = self.dead_alloc_map.insert(
233 (Size::from_bytes(alloc.bytes.len() as u64), alloc.align)
236 bug!("Nothing can be deallocated twice");
242 /// Check that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
243 /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
246 ptr: Scalar<M::PointerTag>,
247 required_align: Align
248 ) -> EvalResult<'tcx> {
249 // Check non-NULL/Undef, extract offset
250 let (offset, alloc_align) = match ptr {
251 Scalar::Ptr(ptr) => {
252 // check this is not NULL -- which we can ensure only if this is in-bounds
253 // of some (potentially dead) allocation.
254 self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead)?;
255 // data required for alignment check
256 let (_, align) = self.get_size_and_align(ptr.alloc_id);
257 (ptr.offset.bytes(), align)
259 Scalar::Bits { bits, size } => {
260 assert_eq!(size as u64, self.pointer_size().bytes());
261 assert!(bits < (1u128 << self.pointer_size().bits()));
262 // check this is not NULL
264 return err!(InvalidNullPointerUsage);
266 // the "base address" is 0 and hence always aligned
267 (bits as u64, required_align)
271 if alloc_align.bytes() < required_align.bytes() {
272 return err!(AlignmentCheckFailed {
274 required: required_align,
277 if offset % required_align.bytes() == 0 {
280 let has = offset % required_align.bytes();
281 err!(AlignmentCheckFailed {
282 has: Align::from_bytes(has).unwrap(),
283 required: required_align,
288 /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end
289 /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
290 /// in-bounds! This follows C's/LLVM's rules. `check` indicates whether we
291 /// additionally require the pointer to be pointing to a *live* (still allocated)
293 /// If you want to check bounds before doing a memory access, better use `check_bounds`.
294 pub fn check_bounds_ptr(
296 ptr: Pointer<M::PointerTag>,
297 check: InboundsCheck,
298 ) -> EvalResult<'tcx> {
299 let allocation_size = match check {
300 InboundsCheck::Live => {
301 let alloc = self.get(ptr.alloc_id)?;
302 alloc.bytes.len() as u64
304 InboundsCheck::MaybeDead => {
305 self.get_size_and_align(ptr.alloc_id).0.bytes()
308 if ptr.offset.bytes() > allocation_size {
309 return err!(PointerOutOfBounds {
310 ptr: ptr.erase_tag(),
312 allocation_size: Size::from_bytes(allocation_size),
318 /// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds".
322 ptr: Pointer<M::PointerTag>,
324 check: InboundsCheck,
325 ) -> EvalResult<'tcx> {
326 // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
327 self.check_bounds_ptr(ptr.offset(size, &*self)?, check)
331 /// Allocation accessors
332 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
333 /// Helper function to obtain the global (tcx) allocation for a static.
334 /// This attempts to return a reference to an existing allocation if
335 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
336 /// this machine use the same pointer tag, so it is indirected through
337 /// `M::static_with_default_tag`.
339 tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
341 ) -> EvalResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
342 let alloc = tcx.alloc_map.lock().get(id);
343 let def_id = match alloc {
344 Some(AllocType::Memory(mem)) => {
345 // We got tcx memory. Let the machine figure out whether and how to
346 // turn that into memory with the right pointer tag.
347 return Ok(M::adjust_static_allocation(mem))
349 Some(AllocType::Function(..)) => {
350 return err!(DerefFunctionPointer)
352 Some(AllocType::Static(did)) => {
356 return err!(DanglingPointerDeref),
358 // We got a "lazy" static that has not been computed yet, do some work
359 trace!("static_alloc: Need to compute {:?}", def_id);
360 if tcx.is_foreign_item(def_id) {
361 return M::find_foreign_static(tcx, def_id);
363 let instance = Instance::mono(tcx.tcx, def_id);
368 // use the raw query here to break validation cycles. Later uses of the static will call the
370 tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
371 // no need to report anything, the const_eval call takes care of that for statics
372 assert!(tcx.is_static(def_id).is_some());
374 ErrorHandled::Reported => EvalErrorKind::ReferencedConstant.into(),
375 ErrorHandled::TooGeneric => EvalErrorKind::TooGeneric.into(),
378 let allocation = tcx.alloc_map.lock().unwrap_memory(raw_const.alloc_id);
379 // We got tcx memory. Let the machine figure out whether and how to
380 // turn that into memory with the right pointer tag.
381 M::adjust_static_allocation(allocation)
385 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
386 // The error type of the inner closure here is somewhat funny. We have two
387 // ways of "erroring": An actual error, or because we got a reference from
388 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
389 // So the error type is `EvalResult<'tcx, &Allocation<M::PointerTag>>`.
390 let a = self.alloc_map.get_or(id, || {
391 let alloc = Self::get_static_alloc(self.tcx, id).map_err(Err)?;
393 Cow::Borrowed(alloc) => {
394 // We got a ref, cheaply return that as an "error" so that the
395 // map does not get mutated.
398 Cow::Owned(alloc) => {
399 // Need to put it into the map and return a ref to that
400 let kind = M::STATIC_KIND.expect(
401 "I got an owned allocation that I have to copy but the machine does \
402 not expect that to happen"
404 Ok((MemoryKind::Machine(kind), alloc))
408 // Now unpack that funny error type
418 ) -> EvalResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
420 let a = self.alloc_map.get_mut_or(id, || {
421 // Need to make a copy, even if `get_static_alloc` is able
422 // to give us a cheap reference.
423 let alloc = Self::get_static_alloc(tcx, id)?;
424 if alloc.mutability == Mutability::Immutable {
425 return err!(ModifiedConstantMemory);
427 let kind = M::STATIC_KIND.expect(
428 "An allocation is being mutated but the machine does not expect that to happen"
430 Ok((MemoryKind::Machine(kind), alloc.into_owned()))
432 // Unpack the error type manually because type inference doesn't
433 // work otherwise (and we cannot help it because `impl Trait`)
438 if a.mutability == Mutability::Immutable {
439 return err!(ModifiedConstantMemory);
446 pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) {
447 if let Ok(alloc) = self.get(id) {
448 return (Size::from_bytes(alloc.bytes.len() as u64), alloc.align);
450 // Could also be a fn ptr or extern static
451 match self.tcx.alloc_map.lock().get(id) {
452 Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()),
453 Some(AllocType::Static(did)) => {
454 // The only way `get` couldn't have worked here is if this is an extern static
455 assert!(self.tcx.is_foreign_item(did));
456 // Use size and align of the type
457 let ty = self.tcx.type_of(did);
458 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
459 (layout.size, layout.align.abi)
462 // Must be a deallocated pointer
463 *self.dead_alloc_map.get(&id).expect(
464 "allocation missing in dead_alloc_map"
470 pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, Instance<'tcx>> {
471 if ptr.offset.bytes() != 0 {
472 return err!(InvalidFunctionPointer);
474 trace!("reading fn ptr: {}", ptr.alloc_id);
475 match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
476 Some(AllocType::Function(instance)) => Ok(instance),
477 _ => Err(EvalErrorKind::ExecuteMemory.into()),
481 pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> {
482 self.get_mut(id)?.mutability = Mutability::Immutable;
486 /// For debugging, print an allocation and all allocations it points to, recursively.
487 pub fn dump_alloc(&self, id: AllocId) {
488 self.dump_allocs(vec![id]);
491 fn dump_alloc_helper<Tag, Extra>(
493 allocs_seen: &mut FxHashSet<AllocId>,
494 allocs_to_print: &mut VecDeque<AllocId>,
496 alloc: &Allocation<Tag, Extra>,
501 let prefix_len = msg.len();
502 let mut relocations = vec![];
504 for i in 0..(alloc.bytes.len() as u64) {
505 let i = Size::from_bytes(i);
506 if let Some(&(_, target_id)) = alloc.relocations.get(&i) {
507 if allocs_seen.insert(target_id) {
508 allocs_to_print.push_back(target_id);
510 relocations.push((i, target_id));
512 if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
513 // this `as usize` is fine, since `i` came from a `usize`
514 write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
521 "{}({} bytes, alignment {}){}",
528 if !relocations.is_empty() {
530 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
531 let mut pos = Size::ZERO;
532 let relocation_width = (self.pointer_size().bytes() - 1) * 3;
533 for (i, target_id) in relocations {
534 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
535 write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
536 let target = format!("({})", target_id);
537 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
538 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
539 pos = i + self.pointer_size();
545 /// For debugging, print a list of allocations and all allocations they point to, recursively.
546 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
547 if !log_enabled!(::log::Level::Trace) {
552 let mut allocs_to_print = VecDeque::from(allocs);
553 let mut allocs_seen = FxHashSet::default();
555 while let Some(id) = allocs_to_print.pop_front() {
556 let msg = format!("Alloc {:<5} ", format!("{}:", id));
559 match self.alloc_map.get_or(id, || Err(())) {
560 Ok((kind, alloc)) => {
561 let extra = match kind {
562 MemoryKind::Stack => " (stack)".to_owned(),
563 MemoryKind::Vtable => " (vtable)".to_owned(),
564 MemoryKind::Machine(m) => format!(" ({:?})", m),
566 self.dump_alloc_helper(
567 &mut allocs_seen, &mut allocs_to_print,
573 match self.tcx.alloc_map.lock().get(id) {
574 Some(AllocType::Memory(alloc)) => {
575 self.dump_alloc_helper(
576 &mut allocs_seen, &mut allocs_to_print,
577 msg, alloc, " (immutable)".to_owned()
580 Some(AllocType::Function(func)) => {
581 trace!("{} {}", msg, func);
583 Some(AllocType::Static(did)) => {
584 trace!("{} {:?}", msg, did);
587 trace!("{} (deallocated)", msg);
596 pub fn leak_report(&self) -> usize {
597 trace!("### LEAK REPORT ###");
598 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
599 if kind.may_leak() { None } else { Some(id) }
602 self.dump_allocs(leaks);
606 /// This is used by [priroda](https://github.com/oli-obk/priroda)
607 pub fn alloc_map(&self) -> &M::MemoryMap {
613 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
614 /// The last argument controls whether we error out when there are undefined
615 /// or pointer bytes. You should never call this, call `get_bytes` or
616 /// `get_bytes_with_undef_and_ptr` instead,
618 /// This function also guarantees that the resulting pointer will remain stable
619 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
621 fn get_bytes_internal(
623 ptr: Pointer<M::PointerTag>,
626 check_defined_and_ptr: bool,
627 ) -> EvalResult<'tcx, &[u8]> {
628 assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
629 self.check_align(ptr.into(), align)?;
630 self.check_bounds(ptr, size, InboundsCheck::Live)?;
632 if check_defined_and_ptr {
633 self.check_defined(ptr, size)?;
634 self.check_relocations(ptr, size)?;
636 // We still don't want relocations on the *edges*
637 self.check_relocation_edges(ptr, size)?;
640 let alloc = self.get(ptr.alloc_id)?;
641 AllocationExtra::memory_read(alloc, ptr, size)?;
643 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
644 assert_eq!(size.bytes() as usize as u64, size.bytes());
645 let offset = ptr.offset.bytes() as usize;
646 Ok(&alloc.bytes[offset..offset + size.bytes() as usize])
652 ptr: Pointer<M::PointerTag>,
655 ) -> EvalResult<'tcx, &[u8]> {
656 self.get_bytes_internal(ptr, size, align, true)
659 /// It is the caller's responsibility to handle undefined and pointer bytes.
660 /// However, this still checks that there are no relocations on the *edges*.
662 fn get_bytes_with_undef_and_ptr(
664 ptr: Pointer<M::PointerTag>,
667 ) -> EvalResult<'tcx, &[u8]> {
668 self.get_bytes_internal(ptr, size, align, false)
671 /// Just calling this already marks everything as defined and removes relocations,
672 /// so be sure to actually put data there!
675 ptr: Pointer<M::PointerTag>,
678 ) -> EvalResult<'tcx, &mut [u8]> {
679 assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
680 self.check_align(ptr.into(), align)?;
681 self.check_bounds(ptr, size, InboundsCheck::Live)?;
683 self.mark_definedness(ptr, size, true)?;
684 self.clear_relocations(ptr, size)?;
686 let alloc = self.get_mut(ptr.alloc_id)?;
687 AllocationExtra::memory_written(alloc, ptr, size)?;
689 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
690 assert_eq!(size.bytes() as usize as u64, size.bytes());
691 let offset = ptr.offset.bytes() as usize;
692 Ok(&mut alloc.bytes[offset..offset + size.bytes() as usize])
696 /// Interning (for CTFE)
697 impl<'a, 'mir, 'tcx, M> Memory<'a, 'mir, 'tcx, M>
699 M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=()>,
700 M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
702 /// mark an allocation as static and initialized, either mutable or not
703 pub fn intern_static(
706 mutability: Mutability,
707 ) -> EvalResult<'tcx> {
709 "mark_static_initialized {:?}, mutability: {:?}",
714 let (kind, mut alloc) = self.alloc_map.remove(&alloc_id).unwrap();
716 MemoryKind::Machine(_) => bug!("Static cannot refer to machine memory"),
717 MemoryKind::Stack | MemoryKind::Vtable => {},
719 // ensure llvm knows not to put this into immutable memory
720 alloc.mutability = mutability;
721 let alloc = self.tcx.intern_const_alloc(alloc);
722 self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc);
723 // recurse into inner allocations
724 for &(_, alloc) in alloc.relocations.values() {
725 // FIXME: Reusing the mutability here is likely incorrect. It is originally
726 // determined via `is_freeze`, and data is considered frozen if there is no
727 // `UnsafeCell` *immediately* in that data -- however, this search stops
728 // at references. So whenever we follow a reference, we should likely
729 // assume immutability -- and we should make sure that the compiler
730 // does not permit code that would break this!
731 if self.alloc_map.contains_key(&alloc) {
732 // Not yet interned, so proceed recursively
733 self.intern_static(alloc, mutability)?;
734 } else if self.dead_alloc_map.contains_key(&alloc) {
736 return err!(ValidationFailure(
737 "encountered dangling pointer in final constant".into(),
745 /// Reading and writing
746 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
749 src: Scalar<M::PointerTag>,
751 dest: Scalar<M::PointerTag>,
754 nonoverlapping: bool,
755 ) -> EvalResult<'tcx> {
756 self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
759 pub fn copy_repeatedly(
761 src: Scalar<M::PointerTag>,
763 dest: Scalar<M::PointerTag>,
767 nonoverlapping: bool,
768 ) -> EvalResult<'tcx> {
769 if size.bytes() == 0 {
770 // Nothing to do for ZST, other than checking alignment and non-NULLness.
771 self.check_align(src, src_align)?;
772 self.check_align(dest, dest_align)?;
775 let src = src.to_ptr()?;
776 let dest = dest.to_ptr()?;
778 // first copy the relocations to a temporary buffer, because
779 // `get_bytes_mut` will clear the relocations, which is correct,
780 // since we don't want to keep any relocations at the target.
781 // (`get_bytes_with_undef_and_ptr` below checks that there are no
782 // relocations overlapping the edges; those would not be handled correctly).
784 let relocations = self.relocations(src, size)?;
785 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
787 new_relocations.extend(
790 .map(|&(offset, reloc)| {
791 (offset + dest.offset - src.offset + (i * size * relocations.len() as u64),
800 // This also checks alignment, and relocation edges on the src.
801 let src_bytes = self.get_bytes_with_undef_and_ptr(src, size, src_align)?.as_ptr();
802 let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr();
804 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
805 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
806 // `dest` could possibly overlap.
807 // The pointers above remain valid even if the `HashMap` table is moved around because they
808 // point into the `Vec` storing the bytes.
810 assert_eq!(size.bytes() as usize as u64, size.bytes());
811 if src.alloc_id == dest.alloc_id {
813 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
814 (dest.offset <= src.offset && dest.offset + size > src.offset)
816 return err!(Intrinsic(
817 "copy_nonoverlapping called on overlapping ranges".to_string(),
824 dest_bytes.offset((size.bytes() * i) as isize),
825 size.bytes() as usize);
829 ptr::copy_nonoverlapping(src_bytes,
830 dest_bytes.offset((size.bytes() * i) as isize),
831 size.bytes() as usize);
836 // copy definedness to the destination
837 self.copy_undef_mask(src, dest, size, length)?;
838 // copy the relocations to the destination
839 self.get_mut(dest.alloc_id)?.relocations.insert_presorted(relocations);
844 pub fn read_c_str(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, &[u8]> {
845 let alloc = self.get(ptr.alloc_id)?;
846 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
847 let offset = ptr.offset.bytes() as usize;
848 match alloc.bytes[offset..].iter().position(|&c| c == 0) {
850 let p1 = Size::from_bytes((size + 1) as u64);
851 self.check_relocations(ptr, p1)?;
852 self.check_defined(ptr, p1)?;
853 Ok(&alloc.bytes[offset..offset + size])
855 None => err!(UnterminatedCString(ptr.erase_tag())),
861 ptr: Scalar<M::PointerTag>,
863 allow_ptr_and_undef: bool,
864 ) -> EvalResult<'tcx> {
865 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
866 let align = Align::from_bytes(1).unwrap();
867 if size.bytes() == 0 {
868 self.check_align(ptr, align)?;
871 let ptr = ptr.to_ptr()?;
872 // Check bounds, align and relocations on the edges
873 self.get_bytes_with_undef_and_ptr(ptr, size, align)?;
874 // Check undef and ptr
875 if !allow_ptr_and_undef {
876 self.check_defined(ptr, size)?;
877 self.check_relocations(ptr, size)?;
882 pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
883 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
884 let align = Align::from_bytes(1).unwrap();
885 if size.bytes() == 0 {
886 self.check_align(ptr, align)?;
889 self.get_bytes(ptr.to_ptr()?, size, align)
892 pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
893 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
894 let align = Align::from_bytes(1).unwrap();
896 self.check_align(ptr, align)?;
899 let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
900 bytes.clone_from_slice(src);
906 ptr: Scalar<M::PointerTag>,
909 ) -> EvalResult<'tcx> {
910 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
911 let align = Align::from_bytes(1).unwrap();
912 if count.bytes() == 0 {
913 self.check_align(ptr, align)?;
916 let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
923 /// Read a *non-ZST* scalar
926 ptr: Pointer<M::PointerTag>,
929 ) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
930 // get_bytes_unchecked tests alignment and relocation edges
931 let bytes = self.get_bytes_with_undef_and_ptr(
932 ptr, size, ptr_align.min(self.int_align(size))
934 // Undef check happens *after* we established that the alignment is correct.
935 // We must not return Ok() for unaligned pointers!
936 if self.check_defined(ptr, size).is_err() {
937 // this inflates undefined bytes to the entire scalar, even if only a few
938 // bytes are undefined
939 return Ok(ScalarMaybeUndef::Undef);
941 // Now we do the actual reading
942 let bits = read_target_uint(self.tcx.data_layout.endian, bytes).unwrap();
943 // See if we got a pointer
944 if size != self.pointer_size() {
945 // *Now* better make sure that the inside also is free of relocations.
946 self.check_relocations(ptr, size)?;
948 let alloc = self.get(ptr.alloc_id)?;
949 match alloc.relocations.get(&ptr.offset) {
950 Some(&(tag, alloc_id)) => {
951 let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
952 return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
957 // We don't. Just return the bits.
958 Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
961 pub fn read_ptr_sized(
963 ptr: Pointer<M::PointerTag>,
965 ) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
966 self.read_scalar(ptr, ptr_align, self.pointer_size())
969 /// Write a *non-ZST* scalar
972 ptr: Pointer<M::PointerTag>,
974 val: ScalarMaybeUndef<M::PointerTag>,
976 ) -> EvalResult<'tcx> {
977 let val = match val {
978 ScalarMaybeUndef::Scalar(scalar) => scalar,
979 ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
982 let bytes = match val {
983 Scalar::Ptr(val) => {
984 assert_eq!(type_size, self.pointer_size());
985 val.offset.bytes() as u128
988 Scalar::Bits { bits, size } => {
989 assert_eq!(size as u64, type_size.bytes());
990 debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
991 "Unexpected value of size {} when writing to memory", size);
997 // get_bytes_mut checks alignment
998 let endian = self.tcx.data_layout.endian;
999 let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
1000 write_target_uint(endian, dst, bytes).unwrap();
1003 // See if we have to also write a relocation
1005 Scalar::Ptr(val) => {
1006 self.get_mut(ptr.alloc_id)?.relocations.insert(
1008 (val.tag, val.alloc_id),
1017 pub fn write_ptr_sized(
1019 ptr: Pointer<M::PointerTag>,
1021 val: ScalarMaybeUndef<M::PointerTag>
1022 ) -> EvalResult<'tcx> {
1023 let ptr_size = self.pointer_size();
1024 self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
1027 fn int_align(&self, size: Size) -> Align {
1028 // We assume pointer-sized integers have the same alignment as pointers.
1029 // We also assume signed and unsigned integers of the same size have the same alignment.
1030 let ity = match size.bytes() {
1036 _ => bug!("bad integer size: {}", size.bytes()),
1043 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
1044 /// Return all relocations overlapping with the given ptr-offset pair.
1047 ptr: Pointer<M::PointerTag>,
1049 ) -> EvalResult<'tcx, &[(Size, (M::PointerTag, AllocId))]> {
1050 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
1051 // the beginning of this range.
1052 let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1);
1053 let end = ptr.offset + size; // this does overflow checking
1054 Ok(self.get(ptr.alloc_id)?.relocations.range(Size::from_bytes(start)..end))
1057 /// Check that there ar eno relocations overlapping with the given range.
1059 fn check_relocations(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
1060 if self.relocations(ptr, size)?.len() != 0 {
1061 err!(ReadPointerAsBytes)
1067 /// Remove all relocations inside the given range.
1068 /// If there are relocations overlapping with the edges, they
1069 /// are removed as well *and* the bytes they cover are marked as
1070 /// uninitialized. This is a somewhat odd "spooky action at a distance",
1071 /// but it allows strictly more code to run than if we would just error
1072 /// immediately in that case.
1073 fn clear_relocations(&mut self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
1074 // Find the start and end of the given range and its outermost relocations.
1075 let (first, last) = {
1076 // Find all relocations overlapping the given range.
1077 let relocations = self.relocations(ptr, size)?;
1078 if relocations.is_empty() {
1082 (relocations.first().unwrap().0,
1083 relocations.last().unwrap().0 + self.pointer_size())
1085 let start = ptr.offset;
1086 let end = start + size;
1088 let alloc = self.get_mut(ptr.alloc_id)?;
1090 // Mark parts of the outermost relocations as undefined if they partially fall outside the
1093 alloc.undef_mask.set_range(first, start, false);
1096 alloc.undef_mask.set_range(end, last, false);
1099 // Forget all the relocations.
1100 alloc.relocations.remove_range(first..last);
1105 /// Error if there are relocations overlapping with the edges of the
1106 /// given memory range.
1108 fn check_relocation_edges(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
1109 self.check_relocations(ptr, Size::ZERO)?;
1110 self.check_relocations(ptr.offset(size, self)?, Size::ZERO)?;
1116 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
1117 // FIXME: Add a fast version for the common, nonoverlapping case
1120 src: Pointer<M::PointerTag>,
1121 dest: Pointer<M::PointerTag>,
1124 ) -> EvalResult<'tcx> {
1125 // The bits have to be saved locally before writing to dest in case src and dest overlap.
1126 assert_eq!(size.bytes() as usize as u64, size.bytes());
1128 let undef_mask = self.get(src.alloc_id)?.undef_mask.clone();
1129 let dest_allocation = self.get_mut(dest.alloc_id)?;
1131 for i in 0..size.bytes() {
1132 let defined = undef_mask.get(src.offset + Size::from_bytes(i));
1134 for j in 0..repeat {
1135 dest_allocation.undef_mask.set(
1136 dest.offset + Size::from_bytes(i + (size.bytes() * j)),
1145 /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
1146 /// error which will report the first byte which is undefined.
1148 fn check_defined(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
1149 let alloc = self.get(ptr.alloc_id)?;
1150 alloc.undef_mask.is_range_defined(
1153 ).or_else(|idx| err!(ReadUndefBytes(idx)))
1156 pub fn mark_definedness(
1158 ptr: Pointer<M::PointerTag>,
1161 ) -> EvalResult<'tcx> {
1162 if size.bytes() == 0 {
1165 let alloc = self.get_mut(ptr.alloc_id)?;
1166 alloc.undef_mask.set_range(