branches:
# whitelist
only:
- - master
+ - auto
+ - try
cache:
- '%USERPROFILE%\.cargo'
- osx
dist: xenial
+# Run in PRs and for bors, but not on master.
+if: branch = auto OR branch = try OR type = pull_request OR type = cron
+
env:
global:
- RUST_TEST_NOCAPTURE=1
branches:
only:
- master
+ - auto
+ - try
-56a12b2ad058f22f1ef090713df15598525ba4a4
+7e08576e4276a97b523c25bfd196d419c39c7b87
// Hook pthread calls that go to the thread-local storage memory subsystem.
"pthread_key_create" => {
- let key_ptr = this.read_scalar(args[0])?.to_ptr()?;
+ let key_ptr = this.read_scalar(args[0])?.not_undef()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
let dtor = match this.read_scalar(args[1])?.not_undef()? {
return err!(OutOfTls);
}
- this.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
+ let key_ptr = this.memory().check_ptr_access(key_ptr, key_layout.size, key_layout.align.abi)?
+ .expect("cannot be a ZST");
this.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
key_ptr,
let ptr = mplace.ptr.to_ptr()?;
this.memory_mut()
.get_mut(ptr.alloc_id)?
- .mark_definedness(ptr, dest.layout.size, false)?;
+ .mark_definedness(ptr, dest.layout.size, false);
}
}
}
let val_byte = this.read_scalar(args[1])?.to_u8()?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let count = this.read_scalar(args[2])?.to_usize(this)?;
- this.memory().check_align(ptr, ty_layout.align.abi)?;
let byte_count = ty_layout.size * count;
- if byte_count.bytes() != 0 {
- let ptr = ptr.to_ptr()?;
- this.memory_mut()
- .get_mut(ptr.alloc_id)?
- .write_repeat(tcx, ptr, val_byte, byte_count)?;
+ match this.memory().check_ptr_access(ptr, byte_count, ty_layout.align.abi)? {
+ Some(ptr) => {
+ this.memory_mut()
+ .get_mut(ptr.alloc_id)?
+ .write_repeat(tcx, ptr, val_byte, byte_count)?;
+ }
+ None => {
+ // Size is 0, nothing to do.
+ }
}
}
use crate::*;
pub trait EvalContextExt<'tcx> {
+ fn pointer_inbounds(
+ &self,
+ ptr: Pointer<Tag>
+ ) -> InterpResult<'tcx>;
+
fn ptr_op(
&self,
bin_op: mir::BinOp,
}
impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
+ /// Test if the pointer is in-bounds of a live allocation.
+ #[inline]
+ fn pointer_inbounds(&self, ptr: Pointer<Tag>) -> InterpResult<'tcx> {
+ let (size, _align) = self.memory().get_size_and_align(ptr.alloc_id, AllocCheck::Live)?;
+ ptr.check_in_alloc(size, CheckInAllocMsg::InboundsTest)
+ }
+
fn ptr_op(
&self,
bin_op: mir::BinOp,
let left = left.to_ptr().expect("we checked is_ptr");
let right = right.to_bits(self.memory().pointer_size()).expect("we checked is_bits");
let (_alloc_size, alloc_align) = self.memory()
- .get_size_and_align(left.alloc_id, InboundsCheck::MaybeDead)
- .expect("determining size+align of dead ptr cannot fail");
+ .get_size_and_align(left.alloc_id, AllocCheck::MaybeDead)
+ .expect("alloc info with MaybeDead cannot fail");
let min_ptr_val = u128::from(alloc_align.bytes()) + u128::from(left.offset.bytes());
let result = match bin_op {
Gt => min_ptr_val > right,
if left.alloc_id == right.alloc_id {
left.offset == right.offset
} else {
+ // Make sure both pointers are in-bounds.
// This accepts one-past-the end. Thus, there is still technically
// some non-determinism that we do not fully rule out when two
// allocations sit right next to each other. The C/C++ standards are
// Dead allocations in miri cannot overlap with live allocations, but
// on read hardware this can easily happen. Thus for comparisons we require
// both pointers to be live.
- self.memory().check_bounds_ptr(left, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
- self.memory().check_bounds_ptr(right, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
- // Two in-bounds pointers, we can compare across allocations.
- left == right
+ if self.pointer_inbounds(left).is_ok() && self.pointer_inbounds(right).is_ok() {
+ // Two in-bounds pointers in different allocations are different.
+ false
+ } else {
+ return err!(InvalidPointerMath);
+ }
}
}
// Comparing ptr and integer.
// alignment 32 or higher, hence the limit of 32.
// FIXME: Once we support intptrcast, we could try to fix these holes.
if bits < 32 {
- // Test if the ptr is in-bounds. Then it cannot be NULL.
- // Even dangling pointers cannot be NULL.
- if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead, CheckInAllocMsg::NullPointerTest).is_ok() {
+ // Test if the pointer can be different from NULL or not.
+ // We assume that pointers that are not NULL are also not "small".
+ if !self.memory().ptr_may_be_null(ptr) {
return Ok(false);
}
}
let (alloc_size, alloc_align) = self.memory()
- .get_size_and_align(ptr.alloc_id, InboundsCheck::MaybeDead)
- .expect("determining size+align of dead ptr cannot fail");
+ .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
+ .expect("alloc info with MaybeDead cannot fail");
// Case II: Alignment gives it away
if ptr.offset.bytes() % alloc_align.bytes() == 0 {
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
- self.memory().check_bounds_ptr(ptr, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
+ self.pointer_inbounds(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
- self.memory().check_bounds_ptr(ptr, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
+ self.pointer_inbounds(ptr)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can only be offset by 0, and we pretend there
use crate::{
InterpResult, InterpError, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor,
- MemoryKind, MiriMemoryKind, RangeMap, AllocId, CheckInAllocMsg, Pointer, Immediate, ImmTy,
- PlaceTy, MPlaceTy,
+ MemoryKind, MiriMemoryKind, RangeMap, AllocId, Pointer, Immediate, ImmTy, PlaceTy, MPlaceTy,
};
pub type PtrId = NonZeroU64;
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let protector = if protect { Some(this.frame().extra) } else { None };
- let ptr = place.ptr.to_ptr()?;
+ let ptr = this.memory().check_ptr_access(place.ptr, size, place.align)
+ .expect("validity checks should have excluded dangling/unaligned pointer")
+ .expect("we shouldn't get here for ZST");
trace!("reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
kind, new_tag, ptr.tag, place.layout.ty, ptr.erase_tag(), size.bytes());
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let alloc = this.memory().get(ptr.alloc_id)?;
- alloc.check_bounds(this, ptr, size, CheckInAllocMsg::InboundsTest)?;
// Update the stacks.
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
// There could be existing unique pointers reborrowed from them that should remain valid!
--- /dev/null
+#![feature(generators, generator_trait)]
+
+use std::{
+ ops::{Generator, GeneratorState},
+ pin::Pin,
+};
+
+fn firstn() -> impl Generator<Yield = u64, Return = ()> {
+ static move || {
+ let mut num = 0;
+ let num = &mut num;
+
+ yield *num;
+ *num += 1; //~ ERROR dangling pointer was dereferenced
+ }
+}
+
+struct GeneratorIteratorAdapter<G>(G);
+
+impl<G> Iterator for GeneratorIteratorAdapter<G>
+where
+ G: Generator<Return = ()>,
+{
+ type Item = G::Yield;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let me = unsafe { Pin::new_unchecked(&mut self.0) };
+ match me.resume() {
+ GeneratorState::Yielded(x) => Some(x),
+ GeneratorState::Complete(_) => None,
+ }
+ }
+}
+
+fn main() {
+ let mut generator_iterator_2 = {
+ let mut generator_iterator = GeneratorIteratorAdapter(firstn());
+ generator_iterator.next(); // pin it
+
+ generator_iterator // move it
+ }; // *deallocate* generator_iterator
+
+ generator_iterator_2.next(); // and use moved value
+}
let y = &*b as *const i32; // different allocation
// We cannot compare these even though both are inbounds -- they *could* be
// equal if memory was reused.
- assert!(x != y); //~ ERROR dangling pointer
+ assert!(x != y); //~ ERROR invalid arithmetic on pointers
}
let y = &*b as *const i32; // different allocation
// We cannot compare these even though both allocations are live -- they *could* be
// equal (with the right base addresses).
- assert!(x != y); //~ ERROR outside bounds
+ assert!(x != y); //~ ERROR invalid arithmetic on pointers
}
+// This should fail even without validation
+// compile-flags: -Zmiri-disable-validation
#![feature(weak_into_raw)]
use std::rc::{Rc, Weak};
--- /dev/null
+// This should fail even without validation
+// compile-flags: -Zmiri-disable-validation
+
+fn main() {
+ let x = [2u16, 3, 4]; // Make it big enough so we don't get an out-of-bounds error.
+ let x = &x[0] as *const _ as *const u32;
+ // This must fail because alignment is violated: the allocation's base is not sufficiently aligned.
+ let _x = unsafe { *x }; //~ ERROR tried to access memory with alignment 2, but alignment 4 is required
+}
--- /dev/null
+// This should fail even without validation.
+// compile-flags: -Zmiri-disable-validation
+
+fn main() {
+ let x = [2u32, 3]; // Make it big enough so we don't get an out-of-bounds error.
+ let x = (x.as_ptr() as *const u8).wrapping_offset(3) as *const u32;
+ // This must fail because alignment is violated: the offset is not sufficiently aligned.
+ // Also make the offset not a power of 2, that used to ICE.
+ let _x = unsafe { *x }; //~ ERROR tried to access memory with alignment 1, but alignment 4 is required
+}
--- /dev/null
+// This should fail even without validation.
+// compile-flags: -Zmiri-disable-validation
+
+fn main() {
+ let x = [2u16, 3, 4, 5]; // Make it big enough so we don't get an out-of-bounds error.
+ let x = &x[0] as *const _ as *const *const u8; // cast to ptr-to-ptr, so that we load a ptr
+ // This must fail because alignment is violated. Test specifically for loading pointers,
+ // which have special code in miri's memory.
+ let _x = unsafe { *x };
+ //~^ ERROR tried to access memory with alignment 2, but alignment
+}
+++ /dev/null
-// This should fail even without validation
-// compile-flags: -Zmiri-disable-validation
-
-fn main() {
- let x = &2u16;
- let x = x as *const _ as *const u32;
- // This must fail because alignment is violated
- let _x = unsafe { *x }; //~ ERROR tried to access memory with alignment 2, but alignment 4 is required
-}
+++ /dev/null
-// This should fail even without validation.
-// compile-flags: -Zmiri-disable-validation
-
-fn main() {
- let x = &2u16;
- let x = x as *const _ as *const *const u8;
- // This must fail because alignment is violated. Test specifically for loading pointers,
- // which have special code in miri's memory.
- let _x = unsafe { *x };
- //~^ ERROR tried to access memory with alignment 2, but alignment
-}
+++ /dev/null
-fn main() {
- let x = &2u16;
- let x = x as *const _ as *const [u32; 0];
- // This must fail because alignment is violated. Test specifically for loading ZST.
- let _x = unsafe { *x };
- //~^ ERROR tried to access memory with alignment 2, but alignment 4 is required
-}
--- /dev/null
+// This should fail even without validation
+// compile-flags: -Zmiri-disable-validation
+
+fn main() {
+ let x = &2u16;
+ let x = x as *const _ as *const [u32; 0];
+ // This must fail because alignment is violated. Test specifically for loading ZST.
+ let _x = unsafe { *x };
+ //~^ ERROR tried to access memory with alignment 2, but alignment 4 is required
+}
use std::mem;
fn main() {
- let _x: &i32 = unsafe { mem::transmute(16usize) }; //~ ERROR tried to interpret some bytes as a pointer
+ let _x: &i32 = unsafe { mem::transmute(16usize) }; //~ ERROR integer pointer in non-ZST reference
}
fn main() {
let val = 14;
let ptr = (&val as *const i32).wrapping_offset(1);
- let _x: &i32 = unsafe { mem::transmute(ptr) }; //~ ERROR outside bounds of allocation
+ let _x: &i32 = unsafe { mem::transmute(ptr) }; //~ ERROR encountered dangling (not entirely in bounds) reference
}
+++ /dev/null
-fn main() {
- let x = &() as *const () as *const i32;
- let _val = unsafe { *x }; //~ ERROR access memory with alignment 1, but alignment 4 is required
-}
--- /dev/null
+fn main() {
+ // make sure ZST locals cannot be accessed
+ let x = &() as *const () as *const i8;
+ let _val = unsafe { *x }; //~ ERROR pointer must be in-bounds
+}
--- /dev/null
+fn main() {
+ // Not using the () type here, as writes of that type do not even have MIR generated.
+ // Also not assigning directly as that's array initialization, not assignment.
+ let zst_val = [1u8; 0];
+
+ // make sure ZST accesses are checked against being "truly" dangling pointers
+ // (into deallocated allocations).
+ let mut x_box = Box::new(1u8);
+ let x = &mut *x_box as *mut _ as *mut [u8; 0];
+ drop(x_box);
+ unsafe { *x = zst_val; } //~ ERROR dangling pointer was dereferenced
+}
--- /dev/null
+fn main() {
+ // Not using the () type here, as writes of that type do not even have MIR generated.
+ // Also not assigning directly as that's array initialization, not assignment.
+ let zst_val = [1u8; 0];
+
+ // make sure ZST accesses are checked against being "truly" dangling pointers
+ // (that are out-of-bounds).
+ let mut x_box = Box::new(1u8);
+ let x = (&mut *x_box as *mut u8).wrapping_offset(1);
+ // This one is just "at the edge", but still okay
+ unsafe { *(x as *mut [u8; 0]) = zst_val; }
+ // One byte further is OOB.
+ let x = x.wrapping_offset(1);
+ unsafe { *(x as *mut [u8; 0]) = zst_val; } //~ ERROR pointer must be in-bounds
+}
fn main() {
let x = &42 as *const i32 as usize;
let y = x * 2;
+ assert_eq!(y, x + x);
let z = y as u8 as usize;
assert_eq!(z, y % 256);
}
// Reading and writing is ok.
unsafe { *x = zst_val; }
unsafe { let _y = *x; }
-
- // We should even be able to use "true" pointers for ZST when the allocation has been
- // removed already. The box is for a non-ZST to make sure there actually is an allocation.
- let mut x_box = Box::new(((), 1u8));
- let x = &mut x_box.0 as *mut _ as *mut [u8; 0];
- drop(x_box);
- // Reading and writing is ok.
- unsafe { *x = zst_val; }
- unsafe { let _y = *x; }
}