1 use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
2 use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
3 use std::{fmt, iter, ptr, mem, io};
6 use rustc::ty::layout::{self, TargetDataLayout};
7 use syntax::ast::Mutability;
9 use error::{EvalError, EvalResult};
10 use value::{PrimVal, Pointer};
11 use eval_context::EvalContext;
13 ////////////////////////////////////////////////////////////////////////////////
14 // Allocations and pointers
15 ////////////////////////////////////////////////////////////////////////////////
17 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
18 pub struct AllocId(pub u64);
20 impl fmt::Display for AllocId {
21 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
22 write!(f, "{}", self.0)
27 pub struct Allocation {
28 /// The actual bytes of the allocation.
29 /// Note that the bytes of a pointer represent the offset of the pointer
31 /// Maps from byte addresses to allocations.
32 /// Only the first byte of a pointer is inserted into the map.
33 pub relocations: BTreeMap<u64, AllocId>,
34 /// Denotes undefined memory. Reading from undefined memory is forbidden in miri
35 pub undef_mask: UndefMask,
36 /// The alignment of the allocation to detect unaligned reads.
38 /// Whether the allocation may be modified.
39 pub mutable: Mutability,
40 /// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this
41 /// allocation is modified or deallocated in the future.
42 /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
46 #[derive(Debug, PartialEq, Copy, Clone)]
48 /// Error if deallocated any other way than `rust_deallocate`
50 /// Error if deallocated any other way than `free`
52 /// Error if deallocated except during a stack pop
54 /// Static in the process of being initialized.
55 /// The difference is important: An immutable static referring to a
56 /// mutable initialized static will freeze immutably and would not
57 /// be able to distinguish already initialized statics from uninitialized ones
59 /// May never be deallocated
61 /// Part of env var emulation
65 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
66 pub struct MemoryPointer {
67 pub alloc_id: AllocId,
71 impl<'tcx> MemoryPointer {
72 pub fn new(alloc_id: AllocId, offset: u64) -> Self {
73 MemoryPointer { alloc_id, offset }
76 pub(crate) fn wrapping_signed_offset<L: PointerArithmetic>(self, i: i64, l: L) -> Self {
77 MemoryPointer::new(self.alloc_id, l.wrapping_signed_offset(self.offset, i))
80 pub(crate) fn overflowing_signed_offset<L: PointerArithmetic>(self, i: i128, l: L) -> (Self, bool) {
81 let (res, over) = l.overflowing_signed_offset(self.offset, i);
82 (MemoryPointer::new(self.alloc_id, res), over)
85 pub(crate) fn signed_offset<L: PointerArithmetic>(self, i: i64, l: L) -> EvalResult<'tcx, Self> {
86 Ok(MemoryPointer::new(self.alloc_id, l.signed_offset(self.offset, i)?))
89 pub(crate) fn overflowing_offset<L: PointerArithmetic>(self, i: u64, l: L) -> (Self, bool) {
90 let (res, over) = l.overflowing_offset(self.offset, i);
91 (MemoryPointer::new(self.alloc_id, res), over)
94 pub(crate) fn offset<L: PointerArithmetic>(self, i: u64, l: L) -> EvalResult<'tcx, Self> {
95 Ok(MemoryPointer::new(self.alloc_id, l.offset(self.offset, i)?))
99 pub type TlsKey = usize;
101 #[derive(Copy, Clone, Debug)]
102 pub struct TlsEntry<'tcx> {
103 data: Pointer, // Will eventually become a map from thread IDs to `Pointer`s, if we ever support more than one thread.
104 dtor: Option<ty::Instance<'tcx>>,
107 ////////////////////////////////////////////////////////////////////////////////
108 // Top-level interpreter memory
109 ////////////////////////////////////////////////////////////////////////////////
111 pub struct Memory<'a, 'tcx> {
112 /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
113 alloc_map: HashMap<AllocId, Allocation>,
115 /// The AllocId to assign to the next new allocation. Always incremented, never gets smaller.
118 /// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from
119 /// stepping out of its own allocations. This set only contains statics backed by an
120 /// allocation. If they are ByVal or ByValPair they are not here, but will be inserted once
121 /// they become ByRef.
122 static_alloc: HashSet<AllocId>,
124 /// Number of virtual bytes allocated.
127 /// Maximum number of virtual bytes that may be allocated.
130 /// Function "allocations". They exist solely so pointers have something to point to, and
131 /// we can figure out what they point to.
132 functions: HashMap<AllocId, ty::Instance<'tcx>>,
134 /// Inverse map of `functions` so we don't allocate a new pointer every time we need one
135 function_alloc_cache: HashMap<ty::Instance<'tcx>, AllocId>,
137 /// Target machine data layout to emulate.
138 pub layout: &'a TargetDataLayout,
140 /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
141 /// allocations for string and bytestring literals.
142 literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
144 /// pthreads-style thread-local storage.
145 thread_local: BTreeMap<TlsKey, TlsEntry<'tcx>>,
147 /// The Key to use for the next thread-local allocation.
148 next_thread_local: TlsKey,
150 /// To avoid having to pass flags to every single memory access, we have some global state saying whether
151 /// alignment checking is currently enforced for read and/or write accesses.
152 reads_are_aligned: bool,
153 writes_are_aligned: bool,
156 impl<'a, 'tcx> Memory<'a, 'tcx> {
157 pub fn new(layout: &'a TargetDataLayout, max_memory: u64) -> Self {
159 alloc_map: HashMap::new(),
160 functions: HashMap::new(),
161 function_alloc_cache: HashMap::new(),
164 memory_size: max_memory,
166 static_alloc: HashSet::new(),
167 literal_alloc_cache: HashMap::new(),
168 thread_local: BTreeMap::new(),
169 next_thread_local: 0,
170 reads_are_aligned: true,
171 writes_are_aligned: true,
175 pub fn allocations(&self) -> ::std::collections::hash_map::Iter<AllocId, Allocation> {
176 self.alloc_map.iter()
179 pub fn create_fn_alloc(&mut self, instance: ty::Instance<'tcx>) -> MemoryPointer {
180 if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) {
181 return MemoryPointer::new(alloc_id, 0);
183 let id = self.next_id;
184 debug!("creating fn ptr: {}", id);
186 self.functions.insert(id, instance);
187 self.function_alloc_cache.insert(instance, id);
188 MemoryPointer::new(id, 0)
191 pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> {
192 if let Some(&alloc_id) = self.literal_alloc_cache.get(bytes) {
193 return Ok(MemoryPointer::new(alloc_id, 0));
196 let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?;
197 self.write_bytes(ptr.into(), bytes)?;
198 self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?;
199 self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id);
203 pub fn allocate(&mut self, size: u64, align: u64, kind: Kind) -> EvalResult<'tcx, MemoryPointer> {
204 assert_ne!(align, 0);
205 assert!(align.is_power_of_two());
207 if self.memory_size - self.memory_usage < size {
208 return Err(EvalError::OutOfMemory {
209 allocation_size: size,
210 memory_size: self.memory_size,
211 memory_usage: self.memory_usage,
214 self.memory_usage += size;
215 assert_eq!(size as usize as u64, size);
216 let alloc = Allocation {
217 bytes: vec![0; size as usize],
218 relocations: BTreeMap::new(),
219 undef_mask: UndefMask::new(size),
222 mutable: Mutability::Mutable,
224 let id = self.next_id;
226 self.alloc_map.insert(id, alloc);
227 Ok(MemoryPointer::new(id, 0))
230 pub fn reallocate(&mut self, ptr: MemoryPointer, old_size: u64, old_align: u64, new_size: u64, new_align: u64, kind: Kind) -> EvalResult<'tcx, MemoryPointer> {
234 return Err(EvalError::ReallocateNonBasePtr);
236 if let Ok(alloc) = self.get(ptr.alloc_id) {
237 if alloc.kind != kind {
238 return Err(EvalError::ReallocatedWrongMemoryKind(alloc.kind, kind));
242 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc"
243 let new_ptr = self.allocate(new_size, new_align, kind)?;
244 self.copy(ptr.into(), new_ptr.into(), min(old_size, new_size), min(old_align, new_align), /*nonoverlapping*/true)?;
245 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
250 pub fn deallocate(&mut self, ptr: MemoryPointer, size_and_align: Option<(u64, u64)>, kind: Kind) -> EvalResult<'tcx> {
252 return Err(EvalError::DeallocateNonBasePtr);
255 let alloc = match self.alloc_map.remove(&ptr.alloc_id) {
256 Some(alloc) => alloc,
257 None => return Err(EvalError::DoubleFree),
260 if alloc.kind != kind {
261 return Err(EvalError::DeallocatedWrongMemoryKind(alloc.kind, kind));
263 if let Some((size, align)) = size_and_align {
264 if size != alloc.bytes.len() as u64 || align != alloc.align {
265 return Err(EvalError::IncorrectAllocationInformation);
269 self.memory_usage -= alloc.bytes.len() as u64;
270 debug!("deallocated : {}", ptr.alloc_id);
275 pub fn pointer_size(&self) -> u64 {
276 self.layout.pointer_size.bytes()
279 pub fn endianess(&self) -> layout::Endian {
283 /// Check that the pointer is aligned and non-NULL
284 pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
285 let offset = match ptr.into_inner_primval() {
286 PrimVal::Ptr(ptr) => {
287 let alloc = self.get(ptr.alloc_id)?;
288 if alloc.align < align {
289 return Err(EvalError::AlignmentCheckFailed {
296 PrimVal::Bytes(bytes) => {
297 let v = ((bytes as u128) % (1 << self.pointer_size())) as u64;
299 return Err(EvalError::InvalidNullPointerUsage);
303 PrimVal::Undef => return Err(EvalError::ReadUndefBytes),
305 if offset % align == 0 {
308 Err(EvalError::AlignmentCheckFailed {
315 pub(crate) fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> {
316 let alloc = self.get(ptr.alloc_id)?;
317 let allocation_size = alloc.bytes.len() as u64;
318 if ptr.offset > allocation_size {
319 return Err(EvalError::PointerOutOfBounds { ptr, access, allocation_size });
324 pub(crate) fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
325 let new_key = self.next_thread_local;
326 self.next_thread_local += 1;
327 self.thread_local.insert(new_key, TlsEntry { data: Pointer::null(), dtor });
328 trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
332 pub(crate) fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx> {
333 return match self.thread_local.remove(&key) {
335 trace!("TLS key {} removed", key);
338 None => Err(EvalError::TlsOutOfBounds)
342 pub(crate) fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> {
343 return match self.thread_local.get(&key) {
344 Some(&TlsEntry { data, .. }) => {
345 trace!("TLS key {} loaded: {:?}", key, data);
348 None => Err(EvalError::TlsOutOfBounds)
352 pub(crate) fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> {
353 return match self.thread_local.get_mut(&key) {
354 Some(&mut TlsEntry { ref mut data, .. }) => {
355 trace!("TLS key {} stored: {:?}", key, new_data);
359 None => Err(EvalError::TlsOutOfBounds)
363 /// Returns a dtor, its argument and its index, if one is supposed to run
365 /// An optional destructor function may be associated with each key value.
366 /// At thread exit, if a key value has a non-NULL destructor pointer,
367 /// and the thread has a non-NULL value associated with that key,
368 /// the value of the key is set to NULL, and then the function pointed
369 /// to is called with the previously associated value as its sole argument.
370 /// The order of destructor calls is unspecified if more than one destructor
371 /// exists for a thread when it exits.
373 /// If, after all the destructors have been called for all non-NULL values
374 /// with associated destructors, there are still some non-NULL values with
375 /// associated destructors, then the process is repeated.
376 /// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor
377 /// calls for outstanding non-NULL values, there are still some non-NULL values
378 /// with associated destructors, implementations may stop calling destructors,
379 /// or they may continue calling destructors until no non-NULL values with
380 /// associated destructors exist, even though this might result in an infinite loop.
381 pub(crate) fn fetch_tls_dtor(&mut self, key: Option<TlsKey>) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>> {
382 use std::collections::Bound::*;
383 let start = match key {
384 Some(key) => Excluded(key),
387 for (&key, &mut TlsEntry { ref mut data, dtor }) in self.thread_local.range_mut((start, Unbounded)) {
388 if !data.is_null()? {
389 if let Some(dtor) = dtor {
390 let ret = Some((dtor, *data, key));
391 *data = Pointer::null();
400 /// Allocation accessors
401 impl<'a, 'tcx> Memory<'a, 'tcx> {
402 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
403 match self.alloc_map.get(&id) {
404 Some(alloc) => Ok(alloc),
405 None => match self.functions.get(&id) {
406 Some(_) => Err(EvalError::DerefFunctionPointer),
407 None => Err(EvalError::DanglingPointerDeref),
412 pub fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
413 match self.alloc_map.get_mut(&id) {
414 Some(alloc) => if alloc.mutable == Mutability::Mutable {
417 Err(EvalError::ModifiedConstantMemory)
419 None => match self.functions.get(&id) {
420 Some(_) => Err(EvalError::DerefFunctionPointer),
421 None => Err(EvalError::DanglingPointerDeref),
426 pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, ty::Instance<'tcx>> {
428 return Err(EvalError::InvalidFunctionPointer);
430 debug!("reading fn ptr: {}", ptr.alloc_id);
431 match self.functions.get(&ptr.alloc_id) {
432 Some(&fndef) => Ok(fndef),
433 None => match self.alloc_map.get(&ptr.alloc_id) {
434 Some(_) => Err(EvalError::ExecuteMemory),
435 None => Err(EvalError::InvalidFunctionPointer),
440 /// For debugging, print an allocation and all allocations it points to, recursively.
441 pub fn dump_alloc(&self, id: AllocId) {
442 self.dump_allocs(vec![id]);
445 /// For debugging, print a list of allocations and all allocations they point to, recursively.
446 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
450 let mut allocs_to_print = VecDeque::from(allocs);
451 let mut allocs_seen = HashSet::new();
453 while let Some(id) = allocs_to_print.pop_front() {
454 let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
455 let prefix_len = msg.len();
456 let mut relocations = vec![];
458 let alloc = match (self.alloc_map.get(&id), self.functions.get(&id)) {
459 (Some(a), None) => a,
460 (None, Some(instance)) => {
461 trace!("{} {}", msg, instance);
465 trace!("{} (deallocated)", msg);
468 (Some(_), Some(_)) => bug!("miri invariant broken: an allocation id exists that points to both a function and a memory location"),
471 for i in 0..(alloc.bytes.len() as u64) {
472 if let Some(&target_id) = alloc.relocations.get(&i) {
473 if allocs_seen.insert(target_id) {
474 allocs_to_print.push_back(target_id);
476 relocations.push((i, target_id));
478 if alloc.undef_mask.is_range_defined(i, i + 1) {
479 // this `as usize` is fine, since `i` came from a `usize`
480 write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
486 let immutable = match (alloc.kind, alloc.mutable) {
487 (Kind::UninitializedStatic, _) => " (static in the process of initialization)",
488 (Kind::Static, Mutability::Mutable) => " (static mut)",
489 (Kind::Static, Mutability::Immutable) => " (immutable)",
490 (Kind::Env, _) => " (env var)",
491 (Kind::C, _) => " (malloc)",
492 (Kind::Rust, _) => " (heap)",
493 (Kind::Stack, _) => " (stack)",
495 trace!("{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), alloc.align, immutable);
497 if !relocations.is_empty() {
499 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
501 let relocation_width = (self.pointer_size() - 1) * 3;
502 for (i, target_id) in relocations {
503 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
504 write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
505 let target = format!("({})", target_id);
506 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
507 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
508 pos = i + self.pointer_size();
515 pub fn leak_report(&self) -> usize {
516 trace!("### LEAK REPORT ###");
517 let leaks: Vec<_> = self.alloc_map
519 .filter_map(|(&key, val)| {
520 if val.kind != Kind::Static {
528 self.dump_allocs(leaks);
534 impl<'a, 'tcx> Memory<'a, 'tcx> {
535 fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
536 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
537 if self.reads_are_aligned {
538 self.check_align(ptr.into(), align)?;
543 self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
544 let alloc = self.get(ptr.alloc_id)?;
545 assert_eq!(ptr.offset as usize as u64, ptr.offset);
546 assert_eq!(size as usize as u64, size);
547 let offset = ptr.offset as usize;
548 Ok(&alloc.bytes[offset..offset + size as usize])
551 fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
552 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
553 if self.writes_are_aligned {
554 self.check_align(ptr.into(), align)?;
559 self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
560 let alloc = self.get_mut(ptr.alloc_id)?;
561 assert_eq!(ptr.offset as usize as u64, ptr.offset);
562 assert_eq!(size as usize as u64, size);
563 let offset = ptr.offset as usize;
564 Ok(&mut alloc.bytes[offset..offset + size as usize])
567 fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
569 if self.relocations(ptr, size)?.count() != 0 {
570 return Err(EvalError::ReadPointerAsBytes);
572 self.check_defined(ptr, size)?;
573 self.get_bytes_unchecked(ptr, size, align)
576 fn get_bytes_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
578 self.clear_relocations(ptr, size)?;
579 self.mark_definedness(ptr.into(), size, true)?;
580 self.get_bytes_unchecked_mut(ptr, size, align)
584 /// Reading and writing
585 impl<'a, 'tcx> Memory<'a, 'tcx> {
586 /// mark an allocation as being the entry point to a static (see `static_alloc` field)
587 pub fn mark_static(&mut self, alloc_id: AllocId) {
588 trace!("mark_static: {:?}", alloc_id);
589 if !self.static_alloc.insert(alloc_id) {
590 bug!("tried to mark an allocation ({:?}) as static twice", alloc_id);
594 /// mark an allocation pointed to by a static as static and initialized
595 pub fn mark_inner_allocation(&mut self, alloc: AllocId, mutability: Mutability) -> EvalResult<'tcx> {
596 // relocations into other statics are not "inner allocations"
597 if !self.static_alloc.contains(&alloc) {
598 self.mark_static_initalized(alloc, mutability)?;
603 /// mark an allocation as static and initialized, either mutable or not
604 pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutability) -> EvalResult<'tcx> {
605 trace!("mark_static_initalized {:?}, mutability: {:?}", alloc_id, mutability);
606 // do not use `self.get_mut(alloc_id)` here, because we might have already marked a
607 // sub-element or have circular pointers (e.g. `Rc`-cycles)
608 let relocations = match self.alloc_map.get_mut(&alloc_id) {
609 Some(&mut Allocation { ref mut relocations, ref mut kind, ref mut mutable, .. }) => {
611 // const eval results can refer to "locals".
612 // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1`
614 // The entire point of this function
615 Kind::UninitializedStatic |
616 // In the future const eval will allow heap allocations so we'll need to protect them
617 // from deallocation, too
621 trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized");
624 // FIXME: This could be allowed, but not for env vars set during miri execution
625 Kind::Env => return Err(EvalError::Unimplemented("statics can't refer to env vars".to_owned())),
627 *kind = Kind::Static;
628 *mutable = mutability;
629 // take out the relocations vector to free the borrow on self, so we can call
631 mem::replace(relocations, Default::default())
633 None if !self.functions.contains_key(&alloc_id) => return Err(EvalError::DanglingPointerDeref),
636 // recurse into inner allocations
637 for &alloc in relocations.values() {
638 self.mark_inner_allocation(alloc, mutability)?;
640 // put back the relocations
641 self.alloc_map.get_mut(&alloc_id).expect("checked above").relocations = relocations;
645 pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> {
647 // Empty accesses don't need to be valid pointers, but they should still be aligned
648 if self.reads_are_aligned {
649 self.check_align(src, align)?;
651 if self.writes_are_aligned {
652 self.check_align(dest, align)?;
656 let src = src.to_ptr()?;
657 let dest = dest.to_ptr()?;
658 self.check_relocation_edges(src, size)?;
660 let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr();
661 let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr();
663 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
664 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
665 // `dest` could possibly overlap.
667 assert_eq!(size as usize as u64, size);
668 if src.alloc_id == dest.alloc_id {
670 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
671 (dest.offset <= src.offset && dest.offset + size > src.offset) {
672 return Err(EvalError::Intrinsic(format!("copy_nonoverlapping called on overlapping ranges")));
675 ptr::copy(src_bytes, dest_bytes, size as usize);
677 ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
681 self.copy_undef_mask(src, dest, size)?;
682 self.copy_relocations(src, dest, size)?;
687 pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
688 let alloc = self.get(ptr.alloc_id)?;
689 assert_eq!(ptr.offset as usize as u64, ptr.offset);
690 let offset = ptr.offset as usize;
691 match alloc.bytes[offset..].iter().position(|&c| c == 0) {
693 if self.relocations(ptr, (size + 1) as u64)?.count() != 0 {
694 return Err(EvalError::ReadPointerAsBytes);
696 self.check_defined(ptr, (size + 1) as u64)?;
697 Ok(&alloc.bytes[offset..offset + size])
699 None => Err(EvalError::UnterminatedCString(ptr)),
703 pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
705 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
706 if self.reads_are_aligned {
707 self.check_align(ptr, 1)?;
711 self.get_bytes(ptr.to_ptr()?, size, 1)
714 pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
716 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
717 if self.writes_are_aligned {
718 self.check_align(ptr, 1)?;
722 let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
723 bytes.clone_from_slice(src);
727 pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
729 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
730 if self.writes_are_aligned {
731 self.check_align(ptr, 1)?;
735 let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
736 for b in bytes { *b = val; }
740 pub fn read_ptr(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Pointer> {
741 let size = self.pointer_size();
742 self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
743 let endianess = self.endianess();
744 let bytes = self.get_bytes_unchecked(ptr, size, size)?;
745 // Undef check happens *after* we established that the alignment is correct.
746 // We must not return Ok() for unaligned pointers!
747 if self.check_defined(ptr, size).is_err() {
748 return Ok(PrimVal::Undef.into());
750 let offset = read_target_uint(endianess, bytes).unwrap();
751 assert_eq!(offset as u64 as u128, offset);
752 let offset = offset as u64;
753 let alloc = self.get(ptr.alloc_id)?;
754 match alloc.relocations.get(&ptr.offset) {
755 Some(&alloc_id) => Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, offset)).into()),
756 None => Ok(PrimVal::Bytes(offset as u128).into()),
760 pub fn write_ptr(&mut self, dest: MemoryPointer, ptr: MemoryPointer) -> EvalResult<'tcx> {
761 self.write_usize(dest, ptr.offset as u64)?;
762 self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id);
766 pub fn write_primval(
771 ) -> EvalResult<'tcx> {
773 PrimVal::Ptr(ptr) => {
774 assert_eq!(size, self.pointer_size());
775 self.write_ptr(dest.to_ptr()?, ptr)
778 PrimVal::Bytes(bytes) => {
779 // We need to mask here, or the byteorder crate can die when given a u64 larger
780 // than fits in an integer of the requested size.
781 let mask = match size {
787 n => bug!("unexpected PrimVal::Bytes size: {}", n),
789 self.write_uint(dest.to_ptr()?, bytes & mask, size)
792 PrimVal::Undef => self.mark_definedness(dest, size, false),
796 pub fn read_bool(&self, ptr: MemoryPointer) -> EvalResult<'tcx, bool> {
797 let bytes = self.get_bytes(ptr, 1, self.layout.i1_align.abi())?;
801 _ => Err(EvalError::InvalidBool),
805 pub fn write_bool(&mut self, ptr: MemoryPointer, b: bool) -> EvalResult<'tcx> {
806 let align = self.layout.i1_align.abi();
807 self.get_bytes_mut(ptr, 1, align)
808 .map(|bytes| bytes[0] = b as u8)
811 fn int_align(&self, size: u64) -> EvalResult<'tcx, u64> {
813 1 => Ok(self.layout.i8_align.abi()),
814 2 => Ok(self.layout.i16_align.abi()),
815 4 => Ok(self.layout.i32_align.abi()),
816 8 => Ok(self.layout.i64_align.abi()),
817 16 => Ok(self.layout.i128_align.abi()),
818 _ => bug!("bad integer size: {}", size),
822 pub fn read_int(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, i128> {
823 let align = self.int_align(size)?;
824 self.get_bytes(ptr, size, align).map(|b| read_target_int(self.endianess(), b).unwrap())
827 pub fn write_int(&mut self, ptr: MemoryPointer, n: i128, size: u64) -> EvalResult<'tcx> {
828 let align = self.int_align(size)?;
829 let endianess = self.endianess();
830 let b = self.get_bytes_mut(ptr, size, align)?;
831 write_target_int(endianess, b, n).unwrap();
835 pub fn read_uint(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, u128> {
836 let align = self.int_align(size)?;
837 self.get_bytes(ptr, size, align).map(|b| read_target_uint(self.endianess(), b).unwrap())
840 pub fn write_uint(&mut self, ptr: MemoryPointer, n: u128, size: u64) -> EvalResult<'tcx> {
841 let align = self.int_align(size)?;
842 let endianess = self.endianess();
843 let b = self.get_bytes_mut(ptr, size, align)?;
844 write_target_uint(endianess, b, n).unwrap();
848 pub fn read_isize(&self, ptr: MemoryPointer) -> EvalResult<'tcx, i64> {
849 self.read_int(ptr, self.pointer_size()).map(|i| i as i64)
852 pub fn write_isize(&mut self, ptr: MemoryPointer, n: i64) -> EvalResult<'tcx> {
853 let size = self.pointer_size();
854 self.write_int(ptr, n as i128, size)
857 pub fn read_usize(&self, ptr: MemoryPointer) -> EvalResult<'tcx, u64> {
858 self.read_uint(ptr, self.pointer_size()).map(|i| i as u64)
861 pub fn write_usize(&mut self, ptr: MemoryPointer, n: u64) -> EvalResult<'tcx> {
862 let size = self.pointer_size();
863 self.write_uint(ptr, n as u128, size)
866 pub fn write_f32(&mut self, ptr: MemoryPointer, f: f32) -> EvalResult<'tcx> {
867 let endianess = self.endianess();
868 let align = self.layout.f32_align.abi();
869 let b = self.get_bytes_mut(ptr, 4, align)?;
870 write_target_f32(endianess, b, f).unwrap();
874 pub fn write_f64(&mut self, ptr: MemoryPointer, f: f64) -> EvalResult<'tcx> {
875 let endianess = self.endianess();
876 let align = self.layout.f64_align.abi();
877 let b = self.get_bytes_mut(ptr, 8, align)?;
878 write_target_f64(endianess, b, f).unwrap();
882 pub fn read_f32(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f32> {
883 self.get_bytes(ptr, 4, self.layout.f32_align.abi())
884 .map(|b| read_target_f32(self.endianess(), b).unwrap())
887 pub fn read_f64(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f64> {
888 self.get_bytes(ptr, 8, self.layout.f64_align.abi())
889 .map(|b| read_target_f64(self.endianess(), b).unwrap())
894 impl<'a, 'tcx> Memory<'a, 'tcx> {
895 fn relocations(&self, ptr: MemoryPointer, size: u64)
896 -> EvalResult<'tcx, btree_map::Range<u64, AllocId>>
898 let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
899 let end = ptr.offset + size;
900 Ok(self.get(ptr.alloc_id)?.relocations.range(start..end))
903 fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
904 // Find all relocations overlapping the given range.
905 let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
906 if keys.is_empty() { return Ok(()); }
908 // Find the start and end of the given range and its outermost relocations.
909 let start = ptr.offset;
910 let end = start + size;
911 let first = *keys.first().unwrap();
912 let last = *keys.last().unwrap() + self.pointer_size();
914 let alloc = self.get_mut(ptr.alloc_id)?;
916 // Mark parts of the outermost relocations as undefined if they partially fall outside the
918 if first < start { alloc.undef_mask.set_range(first, start, false); }
919 if last > end { alloc.undef_mask.set_range(end, last, false); }
921 // Forget all the relocations.
922 for k in keys { alloc.relocations.remove(&k); }
927 fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
928 let overlapping_start = self.relocations(ptr, 0)?.count();
929 let overlapping_end = self.relocations(ptr.offset(size, self.layout)?, 0)?.count();
930 if overlapping_start + overlapping_end != 0 {
931 return Err(EvalError::ReadPointerAsBytes);
936 fn copy_relocations(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> {
937 let relocations: Vec<_> = self.relocations(src, size)?
938 .map(|(&offset, &alloc_id)| {
939 // Update relocation offsets for the new positions in the destination allocation.
940 (offset + dest.offset - src.offset, alloc_id)
943 self.get_mut(dest.alloc_id)?.relocations.extend(relocations);
949 impl<'a, 'tcx> Memory<'a, 'tcx> {
950 // FIXME(solson): This is a very naive, slow version.
951 fn copy_undef_mask(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> {
952 // The bits have to be saved locally before writing to dest in case src and dest overlap.
953 assert_eq!(size as usize as u64, size);
954 let mut v = Vec::with_capacity(size as usize);
956 let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
959 for (i, defined) in v.into_iter().enumerate() {
960 self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i as u64, defined);
965 fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
966 let alloc = self.get(ptr.alloc_id)?;
967 if !alloc.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) {
968 return Err(EvalError::ReadUndefBytes);
973 pub fn mark_definedness(
978 ) -> EvalResult<'tcx> {
982 let ptr = ptr.to_ptr()?;
983 let mut alloc = self.get_mut(ptr.alloc_id)?;
984 alloc.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
989 ////////////////////////////////////////////////////////////////////////////////
990 // Methods to access integers in the target endianess
991 ////////////////////////////////////////////////////////////////////////////////
993 fn write_target_uint(endianess: layout::Endian, mut target: &mut [u8], data: u128) -> Result<(), io::Error> {
994 let len = target.len();
996 layout::Endian::Little => target.write_uint128::<LittleEndian>(data, len),
997 layout::Endian::Big => target.write_uint128::<BigEndian>(data, len),
1000 fn write_target_int(endianess: layout::Endian, mut target: &mut [u8], data: i128) -> Result<(), io::Error> {
1001 let len = target.len();
1003 layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
1004 layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
1008 fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
1010 layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
1011 layout::Endian::Big => source.read_uint128::<BigEndian>(source.len()),
1014 fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result<i128, io::Error> {
1016 layout::Endian::Little => source.read_int128::<LittleEndian>(source.len()),
1017 layout::Endian::Big => source.read_int128::<BigEndian>(source.len()),
1021 ////////////////////////////////////////////////////////////////////////////////
1022 // Methods to access floats in the target endianess
1023 ////////////////////////////////////////////////////////////////////////////////
1025 fn write_target_f32(endianess: layout::Endian, mut target: &mut [u8], data: f32) -> Result<(), io::Error> {
1027 layout::Endian::Little => target.write_f32::<LittleEndian>(data),
1028 layout::Endian::Big => target.write_f32::<BigEndian>(data),
1031 fn write_target_f64(endianess: layout::Endian, mut target: &mut [u8], data: f64) -> Result<(), io::Error> {
1033 layout::Endian::Little => target.write_f64::<LittleEndian>(data),
1034 layout::Endian::Big => target.write_f64::<BigEndian>(data),
1038 fn read_target_f32(endianess: layout::Endian, mut source: &[u8]) -> Result<f32, io::Error> {
1040 layout::Endian::Little => source.read_f32::<LittleEndian>(),
1041 layout::Endian::Big => source.read_f32::<BigEndian>(),
1044 fn read_target_f64(endianess: layout::Endian, mut source: &[u8]) -> Result<f64, io::Error> {
1046 layout::Endian::Little => source.read_f64::<LittleEndian>(),
1047 layout::Endian::Big => source.read_f64::<BigEndian>(),
1051 ////////////////////////////////////////////////////////////////////////////////
1052 // Undefined byte tracking
1053 ////////////////////////////////////////////////////////////////////////////////
1056 const BLOCK_SIZE: u64 = 64;
1058 #[derive(Clone, Debug)]
1059 pub struct UndefMask {
1065 fn new(size: u64) -> Self {
1066 let mut m = UndefMask {
1070 m.grow(size, false);
1074 /// Check whether the range `start..end` (end-exclusive) is entirely defined.
1075 pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
1076 if end > self.len { return false; }
1077 for i in start..end {
1078 if !self.get(i) { return false; }
1083 fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
1085 if end > len { self.grow(end - len, new_state); }
1086 self.set_range_inbounds(start, end, new_state);
1089 fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
1090 for i in start..end { self.set(i, new_state); }
1093 fn get(&self, i: u64) -> bool {
1094 let (block, bit) = bit_index(i);
1095 (self.blocks[block] & 1 << bit) != 0
1098 fn set(&mut self, i: u64, new_state: bool) {
1099 let (block, bit) = bit_index(i);
1101 self.blocks[block] |= 1 << bit;
1103 self.blocks[block] &= !(1 << bit);
1107 fn grow(&mut self, amount: u64, new_state: bool) {
1108 let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
1109 if amount > unused_trailing_bits {
1110 let additional_blocks = amount / BLOCK_SIZE + 1;
1111 assert_eq!(additional_blocks as usize as u64, additional_blocks);
1112 self.blocks.extend(iter::repeat(0).take(additional_blocks as usize));
1114 let start = self.len;
1116 self.set_range_inbounds(start, start + amount, new_state);
1120 fn bit_index(bits: u64) -> (usize, usize) {
1121 let a = bits / BLOCK_SIZE;
1122 let b = bits % BLOCK_SIZE;
1123 assert_eq!(a as usize as u64, a);
1124 assert_eq!(b as usize as u64, b);
1125 (a as usize, b as usize)
1128 ////////////////////////////////////////////////////////////////////////////////
1129 // Unaligned accesses
1130 ////////////////////////////////////////////////////////////////////////////////
1132 pub(crate) trait HasMemory<'a, 'tcx> {
1133 fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx>;
1134 fn memory(&self) -> &Memory<'a, 'tcx>;
1136 // These are not supposed to be overriden.
1137 fn read_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1138 where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
1140 assert!(self.memory_mut().reads_are_aligned, "Unaligned reads must not be nested");
1141 self.memory_mut().reads_are_aligned = aligned;
1143 self.memory_mut().reads_are_aligned = true;
1147 fn write_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1148 where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
1150 assert!(self.memory_mut().writes_are_aligned, "Unaligned writes must not be nested");
1151 self.memory_mut().writes_are_aligned = aligned;
1153 self.memory_mut().writes_are_aligned = true;
1158 impl<'a, 'tcx> HasMemory<'a, 'tcx> for Memory<'a, 'tcx> {
1160 fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
1165 fn memory(&self) -> &Memory<'a, 'tcx> {
1170 impl<'a, 'tcx> HasMemory<'a, 'tcx> for EvalContext<'a, 'tcx> {
1172 fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
1177 fn memory(&self) -> &Memory<'a, 'tcx> {
1182 ////////////////////////////////////////////////////////////////////////////////
1183 // Pointer arithmetic
1184 ////////////////////////////////////////////////////////////////////////////////
1186 pub trait PointerArithmetic : layout::HasDataLayout {
1187 // These are not supposed to be overriden.
1189 //// Trunace the given value to the pointer size; also return whether there was an overflow
1190 fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
1191 let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
1192 ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
1195 // Overflow checking only works properly on the range from -u64 to +u64.
1196 fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
1197 // FIXME: is it possible to over/underflow here?
1199 // trickery to ensure that i64::min_value() works fine
1200 // this formula only works for true negative values, it panics for zero!
1201 let n = u64::max_value() - (i as u64) + 1;
1202 val.overflowing_sub(n)
1204 self.overflowing_offset(val, i as u64)
1208 fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
1209 let (res, over1) = val.overflowing_add(i);
1210 let (res, over2) = self.truncate_to_ptr(res as u128);
1211 (res, over1 || over2)
1214 fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
1215 let (res, over) = self.overflowing_signed_offset(val, i as i128);
1217 Err(EvalError::OverflowingMath)
1223 fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
1224 let (res, over) = self.overflowing_offset(val, i);
1226 Err(EvalError::OverflowingMath)
1232 fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
1233 self.overflowing_signed_offset(val, i as i128).0
1237 impl<T: layout::HasDataLayout> PointerArithmetic for T {}
1239 impl<'a, 'tcx> layout::HasDataLayout for &'a Memory<'a, 'tcx> {
1241 fn data_layout(&self) -> &TargetDataLayout {
1245 impl<'a, 'tcx> layout::HasDataLayout for &'a EvalContext<'a, 'tcx> {
1247 fn data_layout(&self) -> &TargetDataLayout {
1248 self.memory().layout