1 use byteorder::{NativeEndian, ReadBytesExt, WriteBytesExt};
2 use std::collections::Bound::{Included, Excluded};
3 use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
4 use std::{fmt, iter, mem, ptr};
6 use error::{EvalError, EvalResult};
9 ////////////////////////////////////////////////////////////////////////////////
10 // Allocations and pointers
11 ////////////////////////////////////////////////////////////////////////////////
13 #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
14 pub struct AllocId(u64);
16 impl fmt::Display for AllocId {
17 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
18 write!(f, "{}", self.0)
23 pub struct Allocation {
25 pub relocations: BTreeMap<usize, AllocId>,
26 pub undef_mask: UndefMask,
29 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
31 pub alloc_id: AllocId,
36 pub fn offset(self, i: isize) -> Self {
37 Pointer { offset: (self.offset as isize + i) as usize, ..self }
41 ////////////////////////////////////////////////////////////////////////////////
42 // Top-level interpreter memory
43 ////////////////////////////////////////////////////////////////////////////////
46 alloc_map: HashMap<AllocId, Allocation>,
48 pub pointer_size: usize,
52 // FIXME: pass tcx.data_layout (This would also allow it to use primitive type alignments to diagnose unaligned memory accesses.)
53 pub fn new(pointer_size: usize) -> Self {
55 alloc_map: HashMap::new(),
57 pointer_size: pointer_size,
61 pub fn allocate(&mut self, size: usize) -> Pointer {
62 let alloc = Allocation {
64 relocations: BTreeMap::new(),
65 undef_mask: UndefMask::new(size),
67 let id = self.next_id;
69 self.alloc_map.insert(id, alloc);
76 // TODO(solson): Track which allocations were returned from __rust_allocate and report an error
77 // when reallocating/deallocating any others.
78 pub fn reallocate(&mut self, ptr: Pointer, new_size: usize) -> EvalResult<()> {
80 // TODO(solson): Report error about non-__rust_allocate'd pointer.
81 return Err(EvalError::Unimplemented(format!("bad pointer offset: {}", ptr.offset)));
84 let alloc = self.get_mut(ptr.alloc_id)?;
85 let size = alloc.bytes.len();
87 let amount = new_size - size;
88 alloc.bytes.extend(iter::repeat(0).take(amount));
89 alloc.undef_mask.grow(amount, false);
90 } else if size > new_size {
91 return Err(EvalError::Unimplemented(format!("unimplemented allocation relocation")));
92 // alloc.bytes.truncate(new_size);
93 // alloc.undef_mask.len = new_size;
94 // TODO: potentially remove relocations
100 // TODO(solson): See comment on `reallocate`.
101 pub fn deallocate(&mut self, ptr: Pointer) -> EvalResult<()> {
103 // TODO(solson): Report error about non-__rust_allocate'd pointer.
104 return Err(EvalError::Unimplemented(format!("bad pointer offset: {}", ptr.offset)));
107 if self.alloc_map.remove(&ptr.alloc_id).is_none() {
108 // TODO(solson): Report error about erroneous free. This is blocked on properly tracking
109 // already-dropped state since this if-statement is entered even in safe code without
116 ////////////////////////////////////////////////////////////////////////////////
117 // Allocation accessors
118 ////////////////////////////////////////////////////////////////////////////////
120 pub fn get(&self, id: AllocId) -> EvalResult<&Allocation> {
121 self.alloc_map.get(&id).ok_or(EvalError::DanglingPointerDeref)
124 pub fn get_mut(&mut self, id: AllocId) -> EvalResult<&mut Allocation> {
125 self.alloc_map.get_mut(&id).ok_or(EvalError::DanglingPointerDeref)
128 /// Print an allocation and all allocations it points to, recursively.
129 pub fn dump(&self, id: AllocId) {
130 let mut allocs_seen = HashSet::new();
131 let mut allocs_to_print = VecDeque::new();
132 allocs_to_print.push_back(id);
134 while let Some(id) = allocs_to_print.pop_front() {
135 allocs_seen.insert(id);
136 let prefix = format!("Alloc {:<5} ", format!("{}:", id));
137 print!("{}", prefix);
138 let mut relocations = vec![];
140 let alloc = match self.alloc_map.get(&id) {
143 println!("(deallocated)");
148 for i in 0..alloc.bytes.len() {
149 if let Some(&target_id) = alloc.relocations.get(&i) {
150 if !allocs_seen.contains(&target_id) {
151 allocs_to_print.push_back(target_id);
153 relocations.push((i, target_id));
155 if alloc.undef_mask.is_range_defined(i, i + 1) {
156 print!("{:02x} ", alloc.bytes[i]);
161 println!("({} bytes)", alloc.bytes.len());
163 if !relocations.is_empty() {
164 print!("{:1$}", "", prefix.len()); // Print spaces.
166 let relocation_width = (self.pointer_size - 1) * 3;
167 for (i, target_id) in relocations {
168 print!("{:1$}", "", (i - pos) * 3);
169 print!("└{0:─^1$}┘ ", format!("({})", target_id), relocation_width);
170 pos = i + self.pointer_size;
177 ////////////////////////////////////////////////////////////////////////////////
179 ////////////////////////////////////////////////////////////////////////////////
181 fn get_bytes_unchecked(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
182 let alloc = self.get(ptr.alloc_id)?;
183 if ptr.offset + size > alloc.bytes.len() {
184 return Err(EvalError::PointerOutOfBounds {
187 allocation_size: alloc.bytes.len(),
190 Ok(&alloc.bytes[ptr.offset..ptr.offset + size])
193 fn get_bytes_unchecked_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<&mut [u8]> {
194 let alloc = self.get_mut(ptr.alloc_id)?;
195 if ptr.offset + size > alloc.bytes.len() {
196 return Err(EvalError::PointerOutOfBounds {
199 allocation_size: alloc.bytes.len(),
202 Ok(&mut alloc.bytes[ptr.offset..ptr.offset + size])
205 fn get_bytes(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
206 if self.relocations(ptr, size)?.count() != 0 {
207 return Err(EvalError::ReadPointerAsBytes);
209 self.check_defined(ptr, size)?;
210 self.get_bytes_unchecked(ptr, size)
213 fn get_bytes_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<&mut [u8]> {
214 self.clear_relocations(ptr, size)?;
215 self.mark_definedness(ptr, size, true)?;
216 self.get_bytes_unchecked_mut(ptr, size)
219 ////////////////////////////////////////////////////////////////////////////////
220 // Reading and writing
221 ////////////////////////////////////////////////////////////////////////////////
223 pub fn copy(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
224 self.check_relocation_edges(src, size)?;
226 let src_bytes = self.get_bytes_unchecked_mut(src, size)?.as_mut_ptr();
227 let dest_bytes = self.get_bytes_mut(dest, size)?.as_mut_ptr();
229 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
230 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
231 // `dest` could possibly overlap.
233 if src.alloc_id == dest.alloc_id {
234 ptr::copy(src_bytes, dest_bytes, size);
236 ptr::copy_nonoverlapping(src_bytes, dest_bytes, size);
240 self.copy_undef_mask(src, dest, size)?;
241 self.copy_relocations(src, dest, size)?;
246 pub fn read_bytes(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
247 self.get_bytes(ptr, size)
250 pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<()> {
251 let bytes = self.get_bytes_mut(ptr, src.len())?;
252 bytes.clone_from_slice(src);
256 pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: usize) -> EvalResult<()> {
257 let bytes = self.get_bytes_mut(ptr, count)?;
258 for b in bytes { *b = val; }
262 pub fn drop_fill(&mut self, ptr: Pointer, size: usize) -> EvalResult<()> {
263 self.write_repeat(ptr, mem::POST_DROP_U8, size)
266 pub fn read_ptr(&self, ptr: Pointer) -> EvalResult<Pointer> {
267 let size = self.pointer_size;
268 self.check_defined(ptr, size)?;
269 let offset = self.get_bytes_unchecked(ptr, size)?
270 .read_uint::<NativeEndian>(size).unwrap() as usize;
271 let alloc = self.get(ptr.alloc_id)?;
272 match alloc.relocations.get(&ptr.offset) {
273 Some(&alloc_id) => Ok(Pointer { alloc_id: alloc_id, offset: offset }),
274 None => Err(EvalError::ReadBytesAsPointer),
278 pub fn write_ptr(&mut self, dest: Pointer, ptr: Pointer) -> EvalResult<()> {
280 let size = self.pointer_size;
281 let mut bytes = self.get_bytes_mut(dest, size)?;
282 bytes.write_uint::<NativeEndian>(ptr.offset as u64, size).unwrap();
284 self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id);
288 pub fn write_primval(&mut self, ptr: Pointer, val: PrimVal) -> EvalResult<()> {
289 let pointer_size = self.pointer_size;
291 PrimVal::Bool(b) => self.write_bool(ptr, b),
292 PrimVal::I8(n) => self.write_int(ptr, n as i64, 1),
293 PrimVal::I16(n) => self.write_int(ptr, n as i64, 2),
294 PrimVal::I32(n) => self.write_int(ptr, n as i64, 4),
295 PrimVal::I64(n) => self.write_int(ptr, n as i64, 8),
296 PrimVal::U8(n) => self.write_uint(ptr, n as u64, 1),
297 PrimVal::U16(n) => self.write_uint(ptr, n as u64, 2),
298 PrimVal::U32(n) => self.write_uint(ptr, n as u64, 4),
299 PrimVal::U64(n) => self.write_uint(ptr, n as u64, 8),
300 PrimVal::IntegerPtr(n) => self.write_uint(ptr, n as u64, pointer_size),
301 PrimVal::AbstractPtr(_p) => unimplemented!(),
305 pub fn read_bool(&self, ptr: Pointer) -> EvalResult<bool> {
306 let bytes = self.get_bytes(ptr, 1)?;
310 _ => Err(EvalError::InvalidBool),
314 pub fn write_bool(&mut self, ptr: Pointer, b: bool) -> EvalResult<()> {
315 self.get_bytes_mut(ptr, 1).map(|bytes| bytes[0] = b as u8)
318 pub fn read_int(&self, ptr: Pointer, size: usize) -> EvalResult<i64> {
319 self.get_bytes(ptr, size).map(|mut b| b.read_int::<NativeEndian>(size).unwrap())
322 pub fn write_int(&mut self, ptr: Pointer, n: i64, size: usize) -> EvalResult<()> {
323 self.get_bytes_mut(ptr, size).map(|mut b| b.write_int::<NativeEndian>(n, size).unwrap())
326 pub fn read_uint(&self, ptr: Pointer, size: usize) -> EvalResult<u64> {
327 self.get_bytes(ptr, size).map(|mut b| b.read_uint::<NativeEndian>(size).unwrap())
330 pub fn write_uint(&mut self, ptr: Pointer, n: u64, size: usize) -> EvalResult<()> {
331 self.get_bytes_mut(ptr, size).map(|mut b| b.write_uint::<NativeEndian>(n, size).unwrap())
334 pub fn read_isize(&self, ptr: Pointer) -> EvalResult<i64> {
335 self.read_int(ptr, self.pointer_size)
338 pub fn write_isize(&mut self, ptr: Pointer, n: i64) -> EvalResult<()> {
339 let size = self.pointer_size;
340 self.write_int(ptr, n, size)
343 pub fn read_usize(&self, ptr: Pointer) -> EvalResult<u64> {
344 self.read_uint(ptr, self.pointer_size)
347 pub fn write_usize(&mut self, ptr: Pointer, n: u64) -> EvalResult<()> {
348 let size = self.pointer_size;
349 self.write_uint(ptr, n, size)
352 ////////////////////////////////////////////////////////////////////////////////
354 ////////////////////////////////////////////////////////////////////////////////
356 fn relocations(&self, ptr: Pointer, size: usize)
357 -> EvalResult<btree_map::Range<usize, AllocId>>
359 let start = ptr.offset.saturating_sub(self.pointer_size - 1);
360 let end = start + size;
361 Ok(self.get(ptr.alloc_id)?.relocations.range(Included(&start), Excluded(&end)))
364 fn clear_relocations(&mut self, ptr: Pointer, size: usize) -> EvalResult<()> {
365 // Find all relocations overlapping the given range.
366 let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
367 if keys.is_empty() { return Ok(()); }
369 // Find the start and end of the given range and its outermost relocations.
370 let start = ptr.offset;
371 let end = start + size;
372 let first = *keys.first().unwrap();
373 let last = *keys.last().unwrap() + self.pointer_size;
375 let alloc = self.get_mut(ptr.alloc_id)?;
377 // Mark parts of the outermost relocations as undefined if they partially fall outside the
379 if first < start { alloc.undef_mask.set_range(first, start, false); }
380 if last > end { alloc.undef_mask.set_range(end, last, false); }
382 // Forget all the relocations.
383 for k in keys { alloc.relocations.remove(&k); }
388 fn check_relocation_edges(&self, ptr: Pointer, size: usize) -> EvalResult<()> {
389 let overlapping_start = self.relocations(ptr, 0)?.count();
390 let overlapping_end = self.relocations(ptr.offset(size as isize), 0)?.count();
391 if overlapping_start + overlapping_end != 0 {
392 return Err(EvalError::ReadPointerAsBytes);
397 fn copy_relocations(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
398 let relocations: Vec<_> = self.relocations(src, size)?
399 .map(|(&offset, &alloc_id)| {
400 // Update relocation offsets for the new positions in the destination allocation.
401 (offset + dest.offset - src.offset, alloc_id)
404 self.get_mut(dest.alloc_id)?.relocations.extend(relocations);
408 ////////////////////////////////////////////////////////////////////////////////
410 ////////////////////////////////////////////////////////////////////////////////
412 // FIXME(solson): This is a very naive, slow version.
413 fn copy_undef_mask(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
414 // The bits have to be saved locally before writing to dest in case src and dest overlap.
415 let mut v = Vec::with_capacity(size);
417 let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
420 for (i, defined) in v.into_iter().enumerate() {
421 self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i, defined);
426 fn check_defined(&self, ptr: Pointer, size: usize) -> EvalResult<()> {
427 let alloc = self.get(ptr.alloc_id)?;
428 if !alloc.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) {
429 return Err(EvalError::ReadUndefBytes);
434 pub fn mark_definedness(&mut self, ptr: Pointer, size: usize, new_state: bool)
437 let mut alloc = self.get_mut(ptr.alloc_id)?;
438 alloc.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
443 ////////////////////////////////////////////////////////////////////////////////
444 // Undefined byte tracking
445 ////////////////////////////////////////////////////////////////////////////////
448 const BLOCK_SIZE: usize = 64;
450 #[derive(Clone, Debug)]
451 pub struct UndefMask {
457 fn new(size: usize) -> Self {
458 let mut m = UndefMask {
466 /// Check whether the range `start..end` (end-exclusive) is entirely defined.
467 fn is_range_defined(&self, start: usize, end: usize) -> bool {
468 if end > self.len { return false; }
469 for i in start..end {
470 if !self.get(i) { return false; }
475 fn set_range(&mut self, start: usize, end: usize, new_state: bool) {
477 if end > len { self.grow(end - len, new_state); }
478 self.set_range_inbounds(start, end, new_state);
481 fn set_range_inbounds(&mut self, start: usize, end: usize, new_state: bool) {
482 for i in start..end { self.set(i, new_state); }
485 fn get(&self, i: usize) -> bool {
486 let (block, bit) = bit_index(i);
487 (self.blocks[block] & 1 << bit) != 0
490 fn set(&mut self, i: usize, new_state: bool) {
491 let (block, bit) = bit_index(i);
493 self.blocks[block] |= 1 << bit;
495 self.blocks[block] &= !(1 << bit);
499 fn grow(&mut self, amount: usize, new_state: bool) {
500 let unused_trailing_bits = self.blocks.len() * BLOCK_SIZE - self.len;
501 if amount > unused_trailing_bits {
502 let additional_blocks = amount / BLOCK_SIZE + 1;
503 self.blocks.extend(iter::repeat(0).take(additional_blocks));
505 let start = self.len;
507 self.set_range_inbounds(start, start + amount, new_state);
511 // fn uniform_block(state: bool) -> Block {
512 // if state { !0 } else { 0 }
515 fn bit_index(bits: usize) -> (usize, usize) {
516 (bits / BLOCK_SIZE, bits % BLOCK_SIZE)