1 use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
2 use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
3 use std::{fmt, iter, ptr, mem, io, ops};
6 use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
7 use syntax::ast::Mutability;
8 use rustc::middle::region::CodeExtent;
10 use error::{EvalError, EvalResult};
11 use value::{PrimVal, Pointer};
12 use eval_context::EvalContext;
14 ////////////////////////////////////////////////////////////////////////////////
16 ////////////////////////////////////////////////////////////////////////////////
21 // The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
22 // by the second field.
23 // This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
24 // `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
25 // At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
26 // This kind of search breaks, if `end < start`, so don't do that!
27 #[derive(Eq, PartialEq, Ord, PartialOrd, Debug)]
28 pub struct MemoryRange {
34 pub fn new(offset: u64, len: u64) -> MemoryRange {
42 pub fn offset(&self) -> u64 {
46 pub fn len(&self) -> u64 {
50 pub fn range(offset: u64, len: u64) -> ops::Range<MemoryRange> {
52 // We select all elements that are within
53 // the range given by the offset into the allocation and the length.
54 // This is sound if "self.contains() || self.overlaps() == true" implies that self is in-range.
55 let left = MemoryRange {
59 let right = MemoryRange {
60 start: offset + len + 1,
66 pub fn contained_in(&self, offset: u64, len: u64) -> bool {
68 offset <= self.start && self.end <= (offset + len)
71 pub fn overlaps(&self, offset: u64, len: u64) -> bool {
73 //let non_overlap = (offset + len) <= self.start || self.end <= offset;
74 (offset + len) > self.start && self.end > offset
80 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
86 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
87 struct DynamicLifetime {
89 pub region: Option<CodeExtent>, // "None" indicates "until the function ends"
92 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
95 RecoverAfter(CodeExtent), // the frame is given by the surrounding LockInfo's lifetime.
98 /// Information about a lock that is or will be held.
99 #[derive(Copy, Clone, Debug)]
100 pub struct LockInfo {
102 lifetime: DynamicLifetime,
107 fn access_permitted(&self, frame: usize, access: AccessKind) -> bool {
108 use self::AccessKind::*;
109 match (self.kind, access) {
110 (Read, Read) => true, // Read access to read-locked region is okay, no matter who's holding the read lock.
111 (Write, _) if self.lifetime.frame == frame => true, // All access is okay when we hold the write lock.
112 _ => false, // Somebody else holding the write lock is not okay
117 ////////////////////////////////////////////////////////////////////////////////
118 // Allocations and pointers
119 ////////////////////////////////////////////////////////////////////////////////
121 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
122 pub struct AllocId(pub u64);
124 impl fmt::Display for AllocId {
125 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
126 write!(f, "{}", self.0)
131 pub struct Allocation {
132 /// The actual bytes of the allocation.
133 /// Note that the bytes of a pointer represent the offset of the pointer
135 /// Maps from byte addresses to allocations.
136 /// Only the first byte of a pointer is inserted into the map.
137 pub relocations: BTreeMap<u64, AllocId>,
138 /// Denotes undefined memory. Reading from undefined memory is forbidden in miri
139 pub undef_mask: UndefMask,
140 /// The alignment of the allocation to detect unaligned reads.
142 /// Whether the allocation may be modified.
143 pub mutable: Mutability,
144 /// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this
145 /// allocation is modified or deallocated in the future.
146 /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
148 /// Memory regions that are locked by some function
149 locks: BTreeMap<MemoryRange, Vec<LockInfo>>,
153 fn iter_locks<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=&'a LockInfo> + 'a {
154 self.locks.range(MemoryRange::range(offset, len))
155 .filter(move |&(range, _)| range.overlaps(offset, len))
156 .flat_map(|(_, locks)| locks.iter())
159 fn iter_lock_vecs_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a mut Vec<LockInfo>)> + 'a {
160 self.locks.range_mut(MemoryRange::range(offset, len))
161 .filter(move |&(range, _)| range.overlaps(offset, len))
164 fn check_locks<'tcx>(&self, frame: usize, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> {
168 for lock in self.iter_locks(offset, len) {
169 // Check if the lock is active, and is in conflict with the access.
170 if lock.status == LockStatus::Held && !lock.access_permitted(frame, access) {
178 #[derive(Debug, PartialEq, Copy, Clone)]
180 /// Error if deallocated any other way than `rust_deallocate`
182 /// Error if deallocated any other way than `free`
184 /// Error if deallocated except during a stack pop
186 /// Static in the process of being initialized.
187 /// The difference is important: An immutable static referring to a
188 /// mutable initialized static will freeze immutably and would not
189 /// be able to distinguish already initialized statics from uninitialized ones
191 /// May never be deallocated
193 /// Part of env var emulation
197 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
198 pub struct MemoryPointer {
199 pub alloc_id: AllocId,
203 impl<'tcx> MemoryPointer {
204 pub fn new(alloc_id: AllocId, offset: u64) -> Self {
205 MemoryPointer { alloc_id, offset }
208 pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
209 MemoryPointer::new(self.alloc_id, cx.data_layout().wrapping_signed_offset(self.offset, i))
212 pub(crate) fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
213 let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
214 (MemoryPointer::new(self.alloc_id, res), over)
217 pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
218 Ok(MemoryPointer::new(self.alloc_id, cx.data_layout().signed_offset(self.offset, i)?))
221 pub(crate) fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
222 let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
223 (MemoryPointer::new(self.alloc_id, res), over)
226 pub(crate) fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
227 Ok(MemoryPointer::new(self.alloc_id, cx.data_layout().offset(self.offset, i)?))
231 ////////////////////////////////////////////////////////////////////////////////
232 // Top-level interpreter memory
233 ////////////////////////////////////////////////////////////////////////////////
235 pub type TlsKey = usize;
237 #[derive(Copy, Clone, Debug)]
238 pub struct TlsEntry<'tcx> {
239 data: Pointer, // Will eventually become a map from thread IDs to `Pointer`s, if we ever support more than one thread.
240 dtor: Option<ty::Instance<'tcx>>,
243 pub struct Memory<'a, 'tcx> {
244 /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
245 alloc_map: HashMap<AllocId, Allocation>,
247 /// The AllocId to assign to the next new allocation. Always incremented, never gets smaller.
250 /// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from
251 /// stepping out of its own allocations. This set only contains statics backed by an
252 /// allocation. If they are ByVal or ByValPair they are not here, but will be inserted once
253 /// they become ByRef.
254 static_alloc: HashSet<AllocId>,
256 /// Number of virtual bytes allocated.
259 /// Maximum number of virtual bytes that may be allocated.
262 /// Function "allocations". They exist solely so pointers have something to point to, and
263 /// we can figure out what they point to.
264 functions: HashMap<AllocId, ty::Instance<'tcx>>,
266 /// Inverse map of `functions` so we don't allocate a new pointer every time we need one
267 function_alloc_cache: HashMap<ty::Instance<'tcx>, AllocId>,
269 /// Target machine data layout to emulate.
270 pub layout: &'a TargetDataLayout,
272 /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
273 /// allocations for string and bytestring literals.
274 literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
276 /// pthreads-style thread-local storage.
277 thread_local: BTreeMap<TlsKey, TlsEntry<'tcx>>,
279 /// The Key to use for the next thread-local allocation.
280 next_thread_local: TlsKey,
282 /// To avoid having to pass flags to every single memory access, we have some global state saying whether
283 /// alignment checking is currently enforced for read and/or write accesses.
284 reads_are_aligned: bool,
285 writes_are_aligned: bool,
287 /// The current stack frame. Used to check accesses against locks.
291 impl<'a, 'tcx> Memory<'a, 'tcx> {
292 pub fn new(layout: &'a TargetDataLayout, max_memory: u64) -> Self {
294 alloc_map: HashMap::new(),
295 functions: HashMap::new(),
296 function_alloc_cache: HashMap::new(),
299 memory_size: max_memory,
301 static_alloc: HashSet::new(),
302 literal_alloc_cache: HashMap::new(),
303 thread_local: BTreeMap::new(),
304 next_thread_local: 0,
305 reads_are_aligned: true,
306 writes_are_aligned: true,
307 cur_frame: usize::max_value(),
311 pub fn allocations(&self) -> ::std::collections::hash_map::Iter<AllocId, Allocation> {
312 self.alloc_map.iter()
315 pub fn create_fn_alloc(&mut self, instance: ty::Instance<'tcx>) -> MemoryPointer {
316 if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) {
317 return MemoryPointer::new(alloc_id, 0);
319 let id = self.next_id;
320 debug!("creating fn ptr: {}", id);
322 self.functions.insert(id, instance);
323 self.function_alloc_cache.insert(instance, id);
324 MemoryPointer::new(id, 0)
327 pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> {
328 if let Some(&alloc_id) = self.literal_alloc_cache.get(bytes) {
329 return Ok(MemoryPointer::new(alloc_id, 0));
332 let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?;
333 self.write_bytes(ptr.into(), bytes)?;
334 self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?;
335 self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id);
339 pub fn allocate(&mut self, size: u64, align: u64, kind: Kind) -> EvalResult<'tcx, MemoryPointer> {
340 assert_ne!(align, 0);
341 assert!(align.is_power_of_two());
343 if self.memory_size - self.memory_usage < size {
344 return Err(EvalError::OutOfMemory {
345 allocation_size: size,
346 memory_size: self.memory_size,
347 memory_usage: self.memory_usage,
350 self.memory_usage += size;
351 assert_eq!(size as usize as u64, size);
352 let alloc = Allocation {
353 bytes: vec![0; size as usize],
354 relocations: BTreeMap::new(),
355 undef_mask: UndefMask::new(size),
358 mutable: Mutability::Mutable,
359 locks: BTreeMap::new(),
361 let id = self.next_id;
363 self.alloc_map.insert(id, alloc);
364 Ok(MemoryPointer::new(id, 0))
367 pub fn reallocate(&mut self, ptr: MemoryPointer, old_size: u64, old_align: u64, new_size: u64, new_align: u64, kind: Kind) -> EvalResult<'tcx, MemoryPointer> {
371 return Err(EvalError::ReallocateNonBasePtr);
373 if let Ok(alloc) = self.get(ptr.alloc_id) {
374 if alloc.kind != kind {
375 return Err(EvalError::ReallocatedWrongMemoryKind(alloc.kind, kind));
379 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc"
380 let new_ptr = self.allocate(new_size, new_align, kind)?;
381 self.copy(ptr.into(), new_ptr.into(), min(old_size, new_size), min(old_align, new_align), /*nonoverlapping*/true)?;
382 self.deallocate(ptr, Some((old_size, old_align)), kind)?;
387 pub fn deallocate(&mut self, ptr: MemoryPointer, size_and_align: Option<(u64, u64)>, kind: Kind) -> EvalResult<'tcx> {
389 return Err(EvalError::DeallocateNonBasePtr);
392 let alloc = match self.alloc_map.remove(&ptr.alloc_id) {
393 Some(alloc) => alloc,
394 None => return Err(EvalError::DoubleFree),
397 // It is okay for us to still holds locks on deallocation -- for example, we could store data we own
398 // in a local, and the local could be deallocated (from StorageDead) before the function returns.
399 // However, we should check *something*. For now, we make sure that there is no conflicting write
400 // lock by another frame. We *have* to permit deallocation if we hold a read lock.
401 // TODO: Figure out the exact rules here.
402 alloc.check_locks(self.cur_frame, 0, alloc.bytes.len() as u64, AccessKind::Read)
403 .map_err(|lock| EvalError::DeallocatedLockedMemory { ptr, lock })?;
405 if alloc.kind != kind {
406 return Err(EvalError::DeallocatedWrongMemoryKind(alloc.kind, kind));
408 if let Some((size, align)) = size_and_align {
409 if size != alloc.bytes.len() as u64 || align != alloc.align {
410 return Err(EvalError::IncorrectAllocationInformation);
414 self.memory_usage -= alloc.bytes.len() as u64;
415 debug!("deallocated : {}", ptr.alloc_id);
420 pub fn pointer_size(&self) -> u64 {
421 self.layout.pointer_size.bytes()
424 pub fn endianess(&self) -> layout::Endian {
428 /// Check that the pointer is aligned AND non-NULL.
429 pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
430 let offset = match ptr.into_inner_primval() {
431 PrimVal::Ptr(ptr) => {
432 let alloc = self.get(ptr.alloc_id)?;
433 if alloc.align < align {
434 return Err(EvalError::AlignmentCheckFailed {
441 PrimVal::Bytes(bytes) => {
442 let v = ((bytes as u128) % (1 << self.pointer_size())) as u64;
444 return Err(EvalError::InvalidNullPointerUsage);
448 PrimVal::Undef => return Err(EvalError::ReadUndefBytes),
450 if offset % align == 0 {
453 Err(EvalError::AlignmentCheckFailed {
460 pub(crate) fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> {
461 let alloc = self.get(ptr.alloc_id)?;
462 let allocation_size = alloc.bytes.len() as u64;
463 if ptr.offset > allocation_size {
464 return Err(EvalError::PointerOutOfBounds { ptr, access, allocation_size });
469 pub(crate) fn set_cur_frame(&mut self, cur_frame: usize) {
470 self.cur_frame = cur_frame;
473 pub(crate) fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
474 let new_key = self.next_thread_local;
475 self.next_thread_local += 1;
476 self.thread_local.insert(new_key, TlsEntry { data: Pointer::null(), dtor });
477 trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
481 pub(crate) fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx> {
482 return match self.thread_local.remove(&key) {
484 trace!("TLS key {} removed", key);
487 None => Err(EvalError::TlsOutOfBounds)
491 pub(crate) fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> {
492 return match self.thread_local.get(&key) {
493 Some(&TlsEntry { data, .. }) => {
494 trace!("TLS key {} loaded: {:?}", key, data);
497 None => Err(EvalError::TlsOutOfBounds)
501 pub(crate) fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> {
502 return match self.thread_local.get_mut(&key) {
503 Some(&mut TlsEntry { ref mut data, .. }) => {
504 trace!("TLS key {} stored: {:?}", key, new_data);
508 None => Err(EvalError::TlsOutOfBounds)
512 /// Returns a dtor, its argument and its index, if one is supposed to run
514 /// An optional destructor function may be associated with each key value.
515 /// At thread exit, if a key value has a non-NULL destructor pointer,
516 /// and the thread has a non-NULL value associated with that key,
517 /// the value of the key is set to NULL, and then the function pointed
518 /// to is called with the previously associated value as its sole argument.
519 /// The order of destructor calls is unspecified if more than one destructor
520 /// exists for a thread when it exits.
522 /// If, after all the destructors have been called for all non-NULL values
523 /// with associated destructors, there are still some non-NULL values with
524 /// associated destructors, then the process is repeated.
525 /// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor
526 /// calls for outstanding non-NULL values, there are still some non-NULL values
527 /// with associated destructors, implementations may stop calling destructors,
528 /// or they may continue calling destructors until no non-NULL values with
529 /// associated destructors exist, even though this might result in an infinite loop.
530 pub(crate) fn fetch_tls_dtor(&mut self, key: Option<TlsKey>) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>> {
531 use std::collections::Bound::*;
532 let start = match key {
533 Some(key) => Excluded(key),
536 for (&key, &mut TlsEntry { ref mut data, dtor }) in self.thread_local.range_mut((start, Unbounded)) {
537 if !data.is_null()? {
538 if let Some(dtor) = dtor {
539 let ret = Some((dtor, *data, key));
540 *data = Pointer::null();
550 impl<'a, 'tcx> Memory<'a, 'tcx> {
551 pub(crate) fn check_locks(&self, ptr: MemoryPointer, len: u64, access: AccessKind) -> EvalResult<'tcx> {
555 let alloc = self.get(ptr.alloc_id)?;
556 alloc.check_locks(self.cur_frame, ptr.offset, len, access)
557 .map_err(|lock| EvalError::MemoryLockViolation { ptr, len, access, lock })
560 /// Acquire the lock for the given lifetime
561 pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Option<CodeExtent>, kind: AccessKind) -> EvalResult<'tcx> {
563 trace!("Acquiring {:?} lock at {:?}, size {} for region {:?}", kind, ptr, len, region);
564 self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
565 self.check_locks(ptr, len, kind)?; // make sure we have the access we are acquiring
566 let lifetime = DynamicLifetime { frame: self.cur_frame, region };
567 let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
568 alloc.locks.entry(MemoryRange::new(ptr.offset, len)).or_insert_with(|| Vec::new()).push(LockInfo { lifetime, kind, status: LockStatus::Held });
572 /// Release a write lock prematurely. If there's just read locks, do nothing.
573 pub(crate) fn release_write_lock_until(&mut self, ptr: MemoryPointer, len: u64, release_until: Option<CodeExtent>) -> EvalResult<'tcx> {
575 let cur_frame = self.cur_frame;
576 let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
578 for (range, locks) in alloc.iter_lock_vecs_mut(ptr.offset, len) {
579 // Check all locks in this region; make sure there are no conflicting write locks of other frames.
580 // Also, if we will recover later, perform our release by changing the lock status.
581 for lock in locks.iter_mut() {
582 if lock.kind == AccessKind::Read || lock.status != LockStatus::Held { continue; }
583 if lock.lifetime.frame != cur_frame {
584 return Err(EvalError::InvalidMemoryLockRelease { ptr, len });
586 if !range.contained_in(ptr.offset, len) {
587 return Err(EvalError::Unimplemented(format!("miri does not support release part of a write-locked region")));
589 let ptr = MemoryPointer { alloc_id : ptr.alloc_id, offset: range.offset() };
590 trace!("Releasing write lock at {:?}, size {} until {:?}", ptr, range.len(), release_until);
591 if let Some(region) = release_until {
592 lock.status = LockStatus::RecoverAfter(region);
596 // If we will not recover, we did not do anything above except for some checks. Now, erase the locks from the list.
597 if let None = release_until {
598 // Delete everything that's a held write lock. We already checked above that these are ours.
599 // Unfortunately, this duplicates the condition from above. Is there anything we can do about this?
600 locks.retain(|lock| lock.kind == AccessKind::Read || lock.status != LockStatus::Held);
607 pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<CodeExtent>) {
608 trace!("Releasing locks that expire at {:?}", ending_region);
609 let cur_frame = self.cur_frame;
610 let has_ended = |lock: &LockInfo| -> bool {
611 if lock.lifetime.frame != cur_frame {
614 match ending_region {
615 None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
616 // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
617 // end of a function. Same for a function still having recoveries.
618 Some(ending_region) => lock.lifetime.region == Some(ending_region),
622 for alloc in self.alloc_map.values_mut() {
623 for (_range, locks) in alloc.locks.iter_mut() {
624 // Delete everything that ends now -- i.e., keep only all the other lifetimes.
625 locks.retain(|lock| !has_ended(lock));
626 // Activate locks that get recovered now
627 if let Some(ending_region) = ending_region {
628 for lock in locks.iter_mut() {
629 if lock.lifetime.frame == cur_frame && lock.status == LockStatus::RecoverAfter(ending_region) {
630 // FIXME: Check if this triggers a conflict between active locks
631 lock.status = LockStatus::Held;
637 // TODO: It may happen now that we leave empty vectors in the map. Is it worth getting rid of them?
641 /// Allocation accessors
642 impl<'a, 'tcx> Memory<'a, 'tcx> {
643 pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
644 match self.alloc_map.get(&id) {
645 Some(alloc) => Ok(alloc),
646 None => match self.functions.get(&id) {
647 Some(_) => Err(EvalError::DerefFunctionPointer),
648 None => Err(EvalError::DanglingPointerDeref),
653 fn get_mut_unchecked(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
654 match self.alloc_map.get_mut(&id) {
655 Some(alloc) => Ok(alloc),
656 None => match self.functions.get(&id) {
657 Some(_) => Err(EvalError::DerefFunctionPointer),
658 None => Err(EvalError::DanglingPointerDeref),
663 pub fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> {
664 let alloc = self.get_mut_unchecked(id)?;
665 if alloc.mutable == Mutability::Mutable {
668 Err(EvalError::ModifiedConstantMemory)
672 pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, ty::Instance<'tcx>> {
674 return Err(EvalError::InvalidFunctionPointer);
676 debug!("reading fn ptr: {}", ptr.alloc_id);
677 match self.functions.get(&ptr.alloc_id) {
678 Some(&fndef) => Ok(fndef),
679 None => match self.alloc_map.get(&ptr.alloc_id) {
680 Some(_) => Err(EvalError::ExecuteMemory),
681 None => Err(EvalError::InvalidFunctionPointer),
686 /// For debugging, print an allocation and all allocations it points to, recursively.
687 pub fn dump_alloc(&self, id: AllocId) {
688 self.dump_allocs(vec![id]);
691 /// For debugging, print a list of allocations and all allocations they point to, recursively.
692 pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
696 let mut allocs_to_print = VecDeque::from(allocs);
697 let mut allocs_seen = HashSet::new();
699 while let Some(id) = allocs_to_print.pop_front() {
700 let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
701 let prefix_len = msg.len();
702 let mut relocations = vec![];
704 let alloc = match (self.alloc_map.get(&id), self.functions.get(&id)) {
705 (Some(a), None) => a,
706 (None, Some(instance)) => {
707 trace!("{} {}", msg, instance);
711 trace!("{} (deallocated)", msg);
714 (Some(_), Some(_)) => bug!("miri invariant broken: an allocation id exists that points to both a function and a memory location"),
717 for i in 0..(alloc.bytes.len() as u64) {
718 if let Some(&target_id) = alloc.relocations.get(&i) {
719 if allocs_seen.insert(target_id) {
720 allocs_to_print.push_back(target_id);
722 relocations.push((i, target_id));
724 if alloc.undef_mask.is_range_defined(i, i + 1) {
725 // this `as usize` is fine, since `i` came from a `usize`
726 write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
732 let immutable = match (alloc.kind, alloc.mutable) {
733 (Kind::UninitializedStatic, _) => " (static in the process of initialization)",
734 (Kind::Static, Mutability::Mutable) => " (static mut)",
735 (Kind::Static, Mutability::Immutable) => " (immutable)",
736 (Kind::Env, _) => " (env var)",
737 (Kind::C, _) => " (malloc)",
738 (Kind::Rust, _) => " (heap)",
739 (Kind::Stack, _) => " (stack)",
741 trace!("{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), alloc.align, immutable);
743 if !relocations.is_empty() {
745 write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
747 let relocation_width = (self.pointer_size() - 1) * 3;
748 for (i, target_id) in relocations {
749 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
750 write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
751 let target = format!("({})", target_id);
752 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
753 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
754 pos = i + self.pointer_size();
761 pub fn leak_report(&self) -> usize {
762 trace!("### LEAK REPORT ###");
763 let leaks: Vec<_> = self.alloc_map
765 .filter_map(|(&key, val)| {
766 if val.kind != Kind::Static {
774 self.dump_allocs(leaks);
780 impl<'a, 'tcx> Memory<'a, 'tcx> {
781 fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
782 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
783 if self.reads_are_aligned {
784 self.check_align(ptr.into(), align)?;
789 self.check_locks(ptr, size, AccessKind::Read)?;
790 self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
791 let alloc = self.get(ptr.alloc_id)?;
792 assert_eq!(ptr.offset as usize as u64, ptr.offset);
793 assert_eq!(size as usize as u64, size);
794 let offset = ptr.offset as usize;
795 Ok(&alloc.bytes[offset..offset + size as usize])
798 fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
799 // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
800 if self.writes_are_aligned {
801 self.check_align(ptr.into(), align)?;
806 self.check_locks(ptr, size, AccessKind::Write)?;
807 self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
808 let alloc = self.get_mut(ptr.alloc_id)?;
809 assert_eq!(ptr.offset as usize as u64, ptr.offset);
810 assert_eq!(size as usize as u64, size);
811 let offset = ptr.offset as usize;
812 Ok(&mut alloc.bytes[offset..offset + size as usize])
815 fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
817 if self.relocations(ptr, size)?.count() != 0 {
818 return Err(EvalError::ReadPointerAsBytes);
820 self.check_defined(ptr, size)?;
821 self.get_bytes_unchecked(ptr, size, align)
824 fn get_bytes_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
826 self.clear_relocations(ptr, size)?;
827 self.mark_definedness(ptr.into(), size, true)?;
828 self.get_bytes_unchecked_mut(ptr, size, align)
832 /// Reading and writing
833 impl<'a, 'tcx> Memory<'a, 'tcx> {
834 /// mark an allocation as being the entry point to a static (see `static_alloc` field)
835 pub fn mark_static(&mut self, alloc_id: AllocId) {
836 trace!("mark_static: {:?}", alloc_id);
837 if !self.static_alloc.insert(alloc_id) {
838 bug!("tried to mark an allocation ({:?}) as static twice", alloc_id);
842 /// mark an allocation pointed to by a static as static and initialized
843 pub fn mark_inner_allocation(&mut self, alloc: AllocId, mutability: Mutability) -> EvalResult<'tcx> {
844 // relocations into other statics are not "inner allocations"
845 if !self.static_alloc.contains(&alloc) {
846 self.mark_static_initalized(alloc, mutability)?;
851 /// mark an allocation as static and initialized, either mutable or not
852 pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutability) -> EvalResult<'tcx> {
853 trace!("mark_static_initalized {:?}, mutability: {:?}", alloc_id, mutability);
854 // do not use `self.get_mut(alloc_id)` here, because we might have already marked a
855 // sub-element or have circular pointers (e.g. `Rc`-cycles)
856 let relocations = match self.alloc_map.get_mut(&alloc_id) {
857 Some(&mut Allocation { ref mut relocations, ref mut kind, ref mut mutable, .. }) => {
859 // const eval results can refer to "locals".
860 // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1`
862 // The entire point of this function
863 Kind::UninitializedStatic |
864 // In the future const eval will allow heap allocations so we'll need to protect them
865 // from deallocation, too
869 trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized");
872 // FIXME: This could be allowed, but not for env vars set during miri execution
873 Kind::Env => return Err(EvalError::Unimplemented("statics can't refer to env vars".to_owned())),
875 *kind = Kind::Static;
876 *mutable = mutability;
877 // take out the relocations vector to free the borrow on self, so we can call
879 mem::replace(relocations, Default::default())
881 None if !self.functions.contains_key(&alloc_id) => return Err(EvalError::DanglingPointerDeref),
884 // recurse into inner allocations
885 for &alloc in relocations.values() {
886 self.mark_inner_allocation(alloc, mutability)?;
888 // put back the relocations
889 self.alloc_map.get_mut(&alloc_id).expect("checked above").relocations = relocations;
893 pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> {
895 // Empty accesses don't need to be valid pointers, but they should still be aligned
896 if self.reads_are_aligned {
897 self.check_align(src, align)?;
899 if self.writes_are_aligned {
900 self.check_align(dest, align)?;
904 let src = src.to_ptr()?;
905 let dest = dest.to_ptr()?;
906 self.check_relocation_edges(src, size)?;
908 let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr();
909 let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr();
911 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
912 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
913 // `dest` could possibly overlap.
915 assert_eq!(size as usize as u64, size);
916 if src.alloc_id == dest.alloc_id {
918 if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
919 (dest.offset <= src.offset && dest.offset + size > src.offset) {
920 return Err(EvalError::Intrinsic(format!("copy_nonoverlapping called on overlapping ranges")));
923 ptr::copy(src_bytes, dest_bytes, size as usize);
925 ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
929 self.copy_undef_mask(src, dest, size)?;
930 self.copy_relocations(src, dest, size)?;
935 pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
936 let alloc = self.get(ptr.alloc_id)?;
937 assert_eq!(ptr.offset as usize as u64, ptr.offset);
938 let offset = ptr.offset as usize;
939 match alloc.bytes[offset..].iter().position(|&c| c == 0) {
941 if self.relocations(ptr, (size + 1) as u64)?.count() != 0 {
942 return Err(EvalError::ReadPointerAsBytes);
944 self.check_defined(ptr, (size + 1) as u64)?;
945 self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?;
946 Ok(&alloc.bytes[offset..offset + size])
948 None => Err(EvalError::UnterminatedCString(ptr)),
952 pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
954 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
955 if self.reads_are_aligned {
956 self.check_align(ptr, 1)?;
960 self.get_bytes(ptr.to_ptr()?, size, 1)
963 pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
965 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
966 if self.writes_are_aligned {
967 self.check_align(ptr, 1)?;
971 let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
972 bytes.clone_from_slice(src);
976 pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
978 // Empty accesses don't need to be valid pointers, but they should still be non-NULL
979 if self.writes_are_aligned {
980 self.check_align(ptr, 1)?;
984 let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
985 for b in bytes { *b = val; }
989 pub fn read_ptr(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Pointer> {
990 let size = self.pointer_size();
991 self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
992 let endianess = self.endianess();
993 let bytes = self.get_bytes_unchecked(ptr, size, size)?;
994 // Undef check happens *after* we established that the alignment is correct.
995 // We must not return Ok() for unaligned pointers!
996 if self.check_defined(ptr, size).is_err() {
997 return Ok(PrimVal::Undef.into());
999 let offset = read_target_uint(endianess, bytes).unwrap();
1000 assert_eq!(offset as u64 as u128, offset);
1001 let offset = offset as u64;
1002 let alloc = self.get(ptr.alloc_id)?;
1003 match alloc.relocations.get(&ptr.offset) {
1004 Some(&alloc_id) => Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, offset)).into()),
1005 None => Ok(PrimVal::Bytes(offset as u128).into()),
1009 pub fn write_ptr(&mut self, dest: MemoryPointer, ptr: MemoryPointer) -> EvalResult<'tcx> {
1010 self.write_usize(dest, ptr.offset as u64)?;
1011 self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id);
1015 pub fn write_primval(
1020 ) -> EvalResult<'tcx> {
1022 PrimVal::Ptr(ptr) => {
1023 assert_eq!(size, self.pointer_size());
1024 self.write_ptr(dest.to_ptr()?, ptr)
1027 PrimVal::Bytes(bytes) => {
1028 // We need to mask here, or the byteorder crate can die when given a u64 larger
1029 // than fits in an integer of the requested size.
1030 let mask = match size {
1036 n => bug!("unexpected PrimVal::Bytes size: {}", n),
1038 self.write_uint(dest.to_ptr()?, bytes & mask, size)
1041 PrimVal::Undef => self.mark_definedness(dest, size, false),
1045 pub fn read_bool(&self, ptr: MemoryPointer) -> EvalResult<'tcx, bool> {
1046 let bytes = self.get_bytes(ptr, 1, self.layout.i1_align.abi())?;
1050 _ => Err(EvalError::InvalidBool),
1054 pub fn write_bool(&mut self, ptr: MemoryPointer, b: bool) -> EvalResult<'tcx> {
1055 let align = self.layout.i1_align.abi();
1056 self.get_bytes_mut(ptr, 1, align)
1057 .map(|bytes| bytes[0] = b as u8)
1060 fn int_align(&self, size: u64) -> EvalResult<'tcx, u64> {
1062 1 => Ok(self.layout.i8_align.abi()),
1063 2 => Ok(self.layout.i16_align.abi()),
1064 4 => Ok(self.layout.i32_align.abi()),
1065 8 => Ok(self.layout.i64_align.abi()),
1066 16 => Ok(self.layout.i128_align.abi()),
1067 _ => bug!("bad integer size: {}", size),
1071 pub fn read_int(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, i128> {
1072 let align = self.int_align(size)?;
1073 self.get_bytes(ptr, size, align).map(|b| read_target_int(self.endianess(), b).unwrap())
1076 pub fn write_int(&mut self, ptr: MemoryPointer, n: i128, size: u64) -> EvalResult<'tcx> {
1077 let align = self.int_align(size)?;
1078 let endianess = self.endianess();
1079 let b = self.get_bytes_mut(ptr, size, align)?;
1080 write_target_int(endianess, b, n).unwrap();
1084 pub fn read_uint(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, u128> {
1085 let align = self.int_align(size)?;
1086 self.get_bytes(ptr, size, align).map(|b| read_target_uint(self.endianess(), b).unwrap())
1089 pub fn write_uint(&mut self, ptr: MemoryPointer, n: u128, size: u64) -> EvalResult<'tcx> {
1090 let align = self.int_align(size)?;
1091 let endianess = self.endianess();
1092 let b = self.get_bytes_mut(ptr, size, align)?;
1093 write_target_uint(endianess, b, n).unwrap();
1097 pub fn read_isize(&self, ptr: MemoryPointer) -> EvalResult<'tcx, i64> {
1098 self.read_int(ptr, self.pointer_size()).map(|i| i as i64)
1101 pub fn write_isize(&mut self, ptr: MemoryPointer, n: i64) -> EvalResult<'tcx> {
1102 let size = self.pointer_size();
1103 self.write_int(ptr, n as i128, size)
1106 pub fn read_usize(&self, ptr: MemoryPointer) -> EvalResult<'tcx, u64> {
1107 self.read_uint(ptr, self.pointer_size()).map(|i| i as u64)
1110 pub fn write_usize(&mut self, ptr: MemoryPointer, n: u64) -> EvalResult<'tcx> {
1111 let size = self.pointer_size();
1112 self.write_uint(ptr, n as u128, size)
1115 pub fn write_f32(&mut self, ptr: MemoryPointer, f: f32) -> EvalResult<'tcx> {
1116 let endianess = self.endianess();
1117 let align = self.layout.f32_align.abi();
1118 let b = self.get_bytes_mut(ptr, 4, align)?;
1119 write_target_f32(endianess, b, f).unwrap();
1123 pub fn write_f64(&mut self, ptr: MemoryPointer, f: f64) -> EvalResult<'tcx> {
1124 let endianess = self.endianess();
1125 let align = self.layout.f64_align.abi();
1126 let b = self.get_bytes_mut(ptr, 8, align)?;
1127 write_target_f64(endianess, b, f).unwrap();
1131 pub fn read_f32(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f32> {
1132 self.get_bytes(ptr, 4, self.layout.f32_align.abi())
1133 .map(|b| read_target_f32(self.endianess(), b).unwrap())
1136 pub fn read_f64(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f64> {
1137 self.get_bytes(ptr, 8, self.layout.f64_align.abi())
1138 .map(|b| read_target_f64(self.endianess(), b).unwrap())
1143 impl<'a, 'tcx> Memory<'a, 'tcx> {
1144 fn relocations(&self, ptr: MemoryPointer, size: u64)
1145 -> EvalResult<'tcx, btree_map::Range<u64, AllocId>>
1147 let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
1148 let end = ptr.offset + size;
1149 Ok(self.get(ptr.alloc_id)?.relocations.range(start..end))
1152 fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
1153 // Find all relocations overlapping the given range.
1154 let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
1155 if keys.is_empty() { return Ok(()); }
1157 // Find the start and end of the given range and its outermost relocations.
1158 let start = ptr.offset;
1159 let end = start + size;
1160 let first = *keys.first().unwrap();
1161 let last = *keys.last().unwrap() + self.pointer_size();
1163 let alloc = self.get_mut(ptr.alloc_id)?;
1165 // Mark parts of the outermost relocations as undefined if they partially fall outside the
1167 if first < start { alloc.undef_mask.set_range(first, start, false); }
1168 if last > end { alloc.undef_mask.set_range(end, last, false); }
1170 // Forget all the relocations.
1171 for k in keys { alloc.relocations.remove(&k); }
1176 fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
1177 let overlapping_start = self.relocations(ptr, 0)?.count();
1178 let overlapping_end = self.relocations(ptr.offset(size, self.layout)?, 0)?.count();
1179 if overlapping_start + overlapping_end != 0 {
1180 return Err(EvalError::ReadPointerAsBytes);
1185 fn copy_relocations(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> {
1186 let relocations: Vec<_> = self.relocations(src, size)?
1187 .map(|(&offset, &alloc_id)| {
1188 // Update relocation offsets for the new positions in the destination allocation.
1189 (offset + dest.offset - src.offset, alloc_id)
1192 self.get_mut(dest.alloc_id)?.relocations.extend(relocations);
1198 impl<'a, 'tcx> Memory<'a, 'tcx> {
1199 // FIXME(solson): This is a very naive, slow version.
1200 fn copy_undef_mask(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> {
1201 // The bits have to be saved locally before writing to dest in case src and dest overlap.
1202 assert_eq!(size as usize as u64, size);
1203 let mut v = Vec::with_capacity(size as usize);
1205 let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
1208 for (i, defined) in v.into_iter().enumerate() {
1209 self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i as u64, defined);
1214 fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
1215 let alloc = self.get(ptr.alloc_id)?;
1216 if !alloc.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) {
1217 return Err(EvalError::ReadUndefBytes);
1222 pub fn mark_definedness(
1227 ) -> EvalResult<'tcx> {
1231 let ptr = ptr.to_ptr()?;
1232 let mut alloc = self.get_mut(ptr.alloc_id)?;
1233 alloc.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
1238 ////////////////////////////////////////////////////////////////////////////////
1239 // Methods to access integers in the target endianess
1240 ////////////////////////////////////////////////////////////////////////////////
1242 fn write_target_uint(endianess: layout::Endian, mut target: &mut [u8], data: u128) -> Result<(), io::Error> {
1243 let len = target.len();
1245 layout::Endian::Little => target.write_uint128::<LittleEndian>(data, len),
1246 layout::Endian::Big => target.write_uint128::<BigEndian>(data, len),
1249 fn write_target_int(endianess: layout::Endian, mut target: &mut [u8], data: i128) -> Result<(), io::Error> {
1250 let len = target.len();
1252 layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
1253 layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
1257 fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
1259 layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
1260 layout::Endian::Big => source.read_uint128::<BigEndian>(source.len()),
1263 fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result<i128, io::Error> {
1265 layout::Endian::Little => source.read_int128::<LittleEndian>(source.len()),
1266 layout::Endian::Big => source.read_int128::<BigEndian>(source.len()),
1270 ////////////////////////////////////////////////////////////////////////////////
1271 // Methods to access floats in the target endianess
1272 ////////////////////////////////////////////////////////////////////////////////
1274 fn write_target_f32(endianess: layout::Endian, mut target: &mut [u8], data: f32) -> Result<(), io::Error> {
1276 layout::Endian::Little => target.write_f32::<LittleEndian>(data),
1277 layout::Endian::Big => target.write_f32::<BigEndian>(data),
1280 fn write_target_f64(endianess: layout::Endian, mut target: &mut [u8], data: f64) -> Result<(), io::Error> {
1282 layout::Endian::Little => target.write_f64::<LittleEndian>(data),
1283 layout::Endian::Big => target.write_f64::<BigEndian>(data),
1287 fn read_target_f32(endianess: layout::Endian, mut source: &[u8]) -> Result<f32, io::Error> {
1289 layout::Endian::Little => source.read_f32::<LittleEndian>(),
1290 layout::Endian::Big => source.read_f32::<BigEndian>(),
1293 fn read_target_f64(endianess: layout::Endian, mut source: &[u8]) -> Result<f64, io::Error> {
1295 layout::Endian::Little => source.read_f64::<LittleEndian>(),
1296 layout::Endian::Big => source.read_f64::<BigEndian>(),
1300 ////////////////////////////////////////////////////////////////////////////////
1301 // Undefined byte tracking
1302 ////////////////////////////////////////////////////////////////////////////////
1305 const BLOCK_SIZE: u64 = 64;
1307 #[derive(Clone, Debug)]
1308 pub struct UndefMask {
1314 fn new(size: u64) -> Self {
1315 let mut m = UndefMask {
1319 m.grow(size, false);
1323 /// Check whether the range `start..end` (end-exclusive) is entirely defined.
1324 pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
1325 if end > self.len { return false; }
1326 for i in start..end {
1327 if !self.get(i) { return false; }
1332 fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
1334 if end > len { self.grow(end - len, new_state); }
1335 self.set_range_inbounds(start, end, new_state);
1338 fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
1339 for i in start..end { self.set(i, new_state); }
1342 fn get(&self, i: u64) -> bool {
1343 let (block, bit) = bit_index(i);
1344 (self.blocks[block] & 1 << bit) != 0
1347 fn set(&mut self, i: u64, new_state: bool) {
1348 let (block, bit) = bit_index(i);
1350 self.blocks[block] |= 1 << bit;
1352 self.blocks[block] &= !(1 << bit);
1356 fn grow(&mut self, amount: u64, new_state: bool) {
1357 let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
1358 if amount > unused_trailing_bits {
1359 let additional_blocks = amount / BLOCK_SIZE + 1;
1360 assert_eq!(additional_blocks as usize as u64, additional_blocks);
1361 self.blocks.extend(iter::repeat(0).take(additional_blocks as usize));
1363 let start = self.len;
1365 self.set_range_inbounds(start, start + amount, new_state);
1369 fn bit_index(bits: u64) -> (usize, usize) {
1370 let a = bits / BLOCK_SIZE;
1371 let b = bits % BLOCK_SIZE;
1372 assert_eq!(a as usize as u64, a);
1373 assert_eq!(b as usize as u64, b);
1374 (a as usize, b as usize)
1377 ////////////////////////////////////////////////////////////////////////////////
1378 // Unaligned accesses
1379 ////////////////////////////////////////////////////////////////////////////////
1381 pub(crate) trait HasMemory<'a, 'tcx> {
1382 fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx>;
1383 fn memory(&self) -> &Memory<'a, 'tcx>;
1385 // These are not supposed to be overriden.
1386 fn read_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1387 where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
1389 assert!(self.memory_mut().reads_are_aligned, "Unaligned reads must not be nested");
1390 self.memory_mut().reads_are_aligned = aligned;
1392 self.memory_mut().reads_are_aligned = true;
1396 fn write_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
1397 where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
1399 assert!(self.memory_mut().writes_are_aligned, "Unaligned writes must not be nested");
1400 self.memory_mut().writes_are_aligned = aligned;
1402 self.memory_mut().writes_are_aligned = true;
1407 impl<'a, 'tcx> HasMemory<'a, 'tcx> for Memory<'a, 'tcx> {
1409 fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
1414 fn memory(&self) -> &Memory<'a, 'tcx> {
1419 impl<'a, 'tcx> HasMemory<'a, 'tcx> for EvalContext<'a, 'tcx> {
1421 fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
1426 fn memory(&self) -> &Memory<'a, 'tcx> {
1431 ////////////////////////////////////////////////////////////////////////////////
1432 // Pointer arithmetic
1433 ////////////////////////////////////////////////////////////////////////////////
1435 pub trait PointerArithmetic : layout::HasDataLayout {
1436 // These are not supposed to be overriden.
1438 //// Trunace the given value to the pointer size; also return whether there was an overflow
1439 fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
1440 let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
1441 ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
1444 // Overflow checking only works properly on the range from -u64 to +u64.
1445 fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
1446 // FIXME: is it possible to over/underflow here?
1448 // trickery to ensure that i64::min_value() works fine
1449 // this formula only works for true negative values, it panics for zero!
1450 let n = u64::max_value() - (i as u64) + 1;
1451 val.overflowing_sub(n)
1453 self.overflowing_offset(val, i as u64)
1457 fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
1458 let (res, over1) = val.overflowing_add(i);
1459 let (res, over2) = self.truncate_to_ptr(res as u128);
1460 (res, over1 || over2)
1463 fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
1464 let (res, over) = self.overflowing_signed_offset(val, i as i128);
1466 Err(EvalError::OverflowingMath)
1472 fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
1473 let (res, over) = self.overflowing_offset(val, i);
1475 Err(EvalError::OverflowingMath)
1481 fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
1482 self.overflowing_signed_offset(val, i as i128).0
1486 impl<T: layout::HasDataLayout> PointerArithmetic for T {}
1488 impl<'a, 'tcx> layout::HasDataLayout for &'a Memory<'a, 'tcx> {
1490 fn data_layout(&self) -> &TargetDataLayout {
1494 impl<'a, 'tcx> layout::HasDataLayout for &'a EvalContext<'a, 'tcx> {
1496 fn data_layout(&self) -> &TargetDataLayout {
1497 self.memory().layout
1501 impl<'c, 'b, 'a, 'tcx> layout::HasDataLayout for &'c &'b mut EvalContext<'a, 'tcx> {
1503 fn data_layout(&self) -> &TargetDataLayout {
1504 self.memory().layout