use super::{
Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
InterpResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
- Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, InboundsCheck,
- InterpError::ValidationFailure,
+ Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
}
}
+/// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
+#[derive(Debug, Copy, Clone)]
+pub enum AllocCheck {
+ /// Allocation must be live and not a function pointer.
+ Dereferencable,
+ /// Allocations needs to be live, but may be a function pointer.
+ Live,
+ /// Allocation may be dead.
+ MaybeDead,
+}
+
+/// The value of a function pointer.
+#[derive(Debug, Copy, Clone)]
+pub enum FnVal<'tcx, Other> {
+ Instance(Instance<'tcx>),
+ Other(Other),
+}
+
+impl<'tcx, Other> FnVal<'tcx, Other> {
+ pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
+ match self {
+ FnVal::Instance(instance) =>
+ Ok(instance),
+ FnVal::Other(_) =>
+ err!(MachineError(
+ format!("Expected instance function pointer, got 'other' pointer")
+ )),
+ }
+ }
+}
+
// `Memory` has to depend on the `Machine` because some of its operations
// (e.g., `get`) call a `Machine` hook.
pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
// FIXME: this should not be public, but interning currently needs access to it
pub(super) alloc_map: M::MemoryMap,
+ /// Map for "extra" function pointers.
+ extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
+
/// To be able to compare pointers with NULL, and to check alignment for accesses
/// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
/// that do not exist any more.
+ // FIXME: this should not be public, but interning currently needs access to it
pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
/// Extra data added by the machine.
pub extra: M::MemoryExtra,
/// Lets us implement `HasDataLayout`, which is awfully convenient.
- pub(super) tcx: TyCtxtAt<'tcx>,
+ pub tcx: TyCtxtAt<'tcx>,
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
fn clone(&self) -> Self {
Memory {
alloc_map: self.alloc_map.clone(),
+ extra_fn_ptr_map: self.extra_fn_ptr_map.clone(),
dead_alloc_map: self.dead_alloc_map.clone(),
extra: (),
tcx: self.tcx,
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
- pub fn new(tcx: TyCtxtAt<'tcx>) -> Self {
+ pub fn new(tcx: TyCtxtAt<'tcx>, extra: M::MemoryExtra) -> Self {
Memory {
alloc_map: M::MemoryMap::default(),
+ extra_fn_ptr_map: FxHashMap::default(),
dead_alloc_map: FxHashMap::default(),
- extra: M::MemoryExtra::default(),
+ extra,
tcx,
}
}
#[inline]
pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
- ptr.with_tag(M::tag_static_base_pointer(ptr.alloc_id, &self.extra))
+ ptr.with_tag(M::tag_static_base_pointer(ptr.alloc_id, &self))
}
- pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer<M::PointerTag> {
- let id = self.tcx.alloc_map.lock().create_fn_alloc(instance);
+ pub fn create_fn_alloc(
+ &mut self,
+ fn_val: FnVal<'tcx, M::ExtraFnVal>,
+ ) -> Pointer<M::PointerTag>
+ {
+ let id = match fn_val {
+ FnVal::Instance(instance) => self.tcx.alloc_map.lock().create_fn_alloc(instance),
+ FnVal::Other(extra) => {
+ // FIXME(RalfJung): Should we have a cache here?
+ let id = self.tcx.alloc_map.lock().reserve();
+ let old = self.extra_fn_ptr_map.insert(id, extra);
+ assert!(old.is_none());
+ id
+ }
+ };
self.tag_static_base_pointer(Pointer::from(id))
}
kind: MemoryKind<M::MemoryKinds>,
) -> Pointer<M::PointerTag> {
let id = self.tcx.alloc_map.lock().reserve();
- let (alloc, tag) = M::tag_allocation(id, Cow::Owned(alloc), Some(kind), &self.extra);
+ let (alloc, tag) = M::tag_allocation(id, Cow::Owned(alloc), Some(kind), &self);
self.alloc_map.insert(id, (kind, alloc.into_owned()));
Pointer::from(id).with_tag(tag)
}
pub fn reallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
- old_size: Size,
- old_align: Align,
+ old_size_and_align: Option<(Size, Align)>,
new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
// For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
// This happens so rarely, the perf advantage is outweighed by the maintenance cost.
let new_ptr = self.allocate(new_size, new_align, kind);
+ let old_size = match old_size_and_align {
+ Some((size, _align)) => size,
+ None => Size::from_bytes(self.get(ptr.alloc_id)?.bytes.len() as u64),
+ };
self.copy(
ptr.into(),
- old_align,
+ Align::from_bytes(1).unwrap(), // old_align anyway gets checked below by `deallocate`
new_ptr.into(),
new_align,
old_size.min(new_size),
/*nonoverlapping*/ true,
)?;
- self.deallocate(ptr, Some((old_size, old_align)), kind)?;
+ self.deallocate(ptr, old_size_and_align, kind)?;
Ok(new_ptr)
}
pub fn deallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
- size_and_align: Option<(Size, Align)>,
+ old_size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKinds>,
) -> InterpResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id);
format!("{:?}", kind),
));
}
- if let Some((size, align)) = size_and_align {
+ if let Some((size, align)) = old_size_and_align {
if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
let bytes = Size::from_bytes(alloc.bytes.len() as u64);
return err!(IncorrectAllocationInformation(size,
Ok(())
}
- /// Checks that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
- /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
- pub fn check_align(
+ /// Check if the given scalar is allowed to do a memory access of given `size`
+ /// and `align`. On success, returns `None` for zero-sized accesses (where
+ /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
+ /// Crucially, if the input is a `Pointer`, we will test it for liveness
+ /// *even of* the size is 0.
+ ///
+ /// Everyone accessing memory based on a `Scalar` should use this method to get the
+ /// `Pointer` they need. And even if you already have a `Pointer`, call this method
+ /// to make sure it is sufficiently aligned and not dangling. Not doing that may
+ /// cause ICEs.
+ pub fn check_ptr_access(
&self,
- ptr: Scalar<M::PointerTag>,
- required_align: Align
- ) -> InterpResult<'tcx> {
- // Check non-NULL/Undef, extract offset
- let (offset, alloc_align) = match ptr.to_bits_or_ptr(self.pointer_size(), self) {
- Err(ptr) => {
- // check this is not NULL -- which we can ensure only if this is in-bounds
- // of some (potentially dead) allocation.
- let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
- CheckInAllocMsg::NullPointerTest)?;
- (ptr.offset.bytes(), align)
+ sptr: Scalar<M::PointerTag>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
+ fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
+ if offset % align.bytes() == 0 {
+ Ok(())
+ } else {
+ // The biggest power of two through which `offset` is divisible.
+ let offset_pow2 = 1 << offset.trailing_zeros();
+ err!(AlignmentCheckFailed {
+ has: Align::from_bytes(offset_pow2).unwrap(),
+ required: align,
+ })
}
- Ok(data) => {
- // check this is not NULL
- if data == 0 {
+ }
+
+ // Normalize to a `Pointer` if we definitely need one.
+ let normalized = if size.bytes() == 0 {
+ // Can be an integer, just take what we got. We do NOT `force_bits` here;
+ // if this is already a `Pointer` we want to do the bounds checks!
+ sptr
+ } else {
+ // A "real" access, we must get a pointer.
+ Scalar::Ptr(self.force_ptr(sptr)?)
+ };
+ Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
+ Ok(bits) => {
+ let bits = bits as u64; // it's ptr-sized
+ assert!(size.bytes() == 0);
+ // Must be non-NULL and aligned.
+ if bits == 0 {
return err!(InvalidNullPointerUsage);
}
- // the "base address" is 0 and hence always aligned
- (data as u64, required_align)
+ check_offset_align(bits, align)?;
+ None
}
- };
- // Check alignment
- if alloc_align.bytes() < required_align.bytes() {
- return err!(AlignmentCheckFailed {
- has: alloc_align,
- required: required_align,
- });
- }
- if offset % required_align.bytes() == 0 {
- Ok(())
- } else {
- let has = offset % required_align.bytes();
- err!(AlignmentCheckFailed {
- has: Align::from_bytes(has).unwrap(),
- required: required_align,
- })
- }
+ Err(ptr) => {
+ let (allocation_size, alloc_align) =
+ self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferencable)?;
+ // Test bounds. This also ensures non-NULL.
+ // It is sufficient to check this for the end pointer. The addition
+ // checks for overflow.
+ let end_ptr = ptr.offset(size, self)?;
+ end_ptr.check_in_alloc(allocation_size, CheckInAllocMsg::MemoryAccessTest)?;
+ // Test align. Check this last; if both bounds and alignment are violated
+ // we want the error to be about the bounds.
+ if alloc_align.bytes() < align.bytes() {
+ // The allocation itself is not aligned enough.
+ // FIXME: Alignment check is too strict, depending on the base address that
+ // got picked we might be aligned even if this check fails.
+ // We instead have to fall back to converting to an integer and checking
+ // the "real" alignment.
+ return err!(AlignmentCheckFailed {
+ has: alloc_align,
+ required: align,
+ });
+ }
+ check_offset_align(ptr.offset.bytes(), align)?;
+
+ // We can still be zero-sized in this branch, in which case we have to
+ // return `None`.
+ if size.bytes() == 0 { None } else { Some(ptr) }
+ }
+ })
}
- /// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end
- /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
- /// in-bounds! This follows C's/LLVM's rules.
- /// If you want to check bounds before doing a memory access, better first obtain
- /// an `Allocation` and call `check_bounds`.
- pub fn check_bounds_ptr(
+ /// Test if the pointer might be NULL.
+ pub fn ptr_may_be_null(
&self,
ptr: Pointer<M::PointerTag>,
- liveness: InboundsCheck,
- msg: CheckInAllocMsg,
- ) -> InterpResult<'tcx, Align> {
- let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id, liveness)?;
- ptr.check_in_alloc(allocation_size, msg)?;
- Ok(align)
+ ) -> bool {
+ let (size, _align) = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
+ .expect("alloc info with MaybeDead cannot fail");
+ ptr.check_in_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
}
}
fn get_static_alloc(
id: AllocId,
tcx: TyCtxtAt<'tcx>,
- memory_extra: &M::MemoryExtra,
+ memory: &Memory<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
let alloc = tcx.alloc_map.lock().get(id);
let alloc = match alloc {
id, // always use the ID we got as input, not the "hidden" one.
alloc,
M::STATIC_KIND.map(MemoryKind::Machine),
- memory_extra
+ memory
).0)
}
// `get_static_alloc` that we can actually use directly without inserting anything anywhere.
// So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
let a = self.alloc_map.get_or(id, || {
- let alloc = Self::get_static_alloc(id, self.tcx, &self.extra).map_err(Err)?;
+ let alloc = Self::get_static_alloc(id, self.tcx, &self).map_err(Err)?;
match alloc {
Cow::Borrowed(alloc) => {
// We got a ref, cheaply return that as an "error" so that the
id: AllocId,
) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
let tcx = self.tcx;
- let memory_extra = &self.extra;
+ let alloc = Self::get_static_alloc(id, tcx, &self);
let a = self.alloc_map.get_mut_or(id, || {
// Need to make a copy, even if `get_static_alloc` is able
// to give us a cheap reference.
- let alloc = Self::get_static_alloc(id, tcx, memory_extra)?;
+ let alloc = alloc?;
if alloc.mutability == Mutability::Immutable {
return err!(ModifiedConstantMemory);
}
}
}
- /// Obtain the size and alignment of an allocation, even if that allocation has been deallocated
+ /// Obtain the size and alignment of an allocation, even if that allocation has
+ /// been deallocated.
///
- /// If `liveness` is `InboundsCheck::MaybeDead`, this function always returns `Ok`
+ /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
pub fn get_size_and_align(
&self,
id: AllocId,
- liveness: InboundsCheck,
+ liveness: AllocCheck,
) -> InterpResult<'static, (Size, Align)> {
+ // Regular allocations.
if let Ok(alloc) = self.get(id) {
return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
}
- // can't do this in the match argument, we may get cycle errors since the lock would get
- // dropped after the match.
+ // Function pointers.
+ if let Ok(_) = self.get_fn_alloc(id) {
+ return if let AllocCheck::Dereferencable = liveness {
+ // The caller requested no function pointers.
+ err!(DerefFunctionPointer)
+ } else {
+ Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
+ };
+ }
+ // Foreign statics.
+ // Can't do this in the match argument, we may get cycle errors since the lock would
+ // be held throughout the match.
let alloc = self.tcx.alloc_map.lock().get(id);
- // Could also be a fn ptr or extern static
match alloc {
- Some(GlobalAlloc::Function(..)) => Ok((Size::ZERO, Align::from_bytes(1).unwrap())),
- // `self.get` would also work, but can cause cycles if a static refers to itself
Some(GlobalAlloc::Static(did)) => {
- // The only way `get` couldn't have worked here is if this is an extern static
assert!(self.tcx.is_foreign_item(did));
// Use size and align of the type
let ty = self.tcx.type_of(did);
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
- Ok((layout.size, layout.align.abi))
+ return Ok((layout.size, layout.align.abi));
+ }
+ _ => {}
+ }
+ // The rest must be dead.
+ if let AllocCheck::MaybeDead = liveness {
+ // Deallocated pointers are allowed, we should be able to find
+ // them in the map.
+ Ok(*self.dead_alloc_map.get(&id)
+ .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
+ } else {
+ err!(DanglingPointerDeref)
+ }
+ }
+
+ fn get_fn_alloc(&self, id: AllocId) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+ trace!("reading fn ptr: {}", id);
+ if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
+ Ok(FnVal::Other(*extra))
+ } else {
+ match self.tcx.alloc_map.lock().get(id) {
+ Some(GlobalAlloc::Function(instance)) => Ok(FnVal::Instance(instance)),
+ _ => Err(InterpError::ExecuteMemory.into()),
}
- _ => {
- if let Ok(alloc) = self.get(id) {
- return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
- }
- match liveness {
- InboundsCheck::MaybeDead => {
- // Must be a deallocated pointer
- self.dead_alloc_map.get(&id).cloned().ok_or_else(||
- ValidationFailure("allocation missing in dead_alloc_map".to_string())
- .into()
- )
- },
- InboundsCheck::Live => err!(DanglingPointerDeref),
- }
- },
}
}
- pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx, Instance<'tcx>> {
+ pub fn get_fn(
+ &self,
+ ptr: Scalar<M::PointerTag>,
+ ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+ let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
if ptr.offset.bytes() != 0 {
return err!(InvalidFunctionPointer);
}
- trace!("reading fn ptr: {}", ptr.alloc_id);
- match self.tcx.alloc_map.lock().get(ptr.alloc_id) {
- Some(GlobalAlloc::Function(instance)) => Ok(instance),
- _ => Err(InterpError::ExecuteMemory.into()),
- }
+ self.get_fn_alloc(ptr.alloc_id)
}
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
}
}
-/// Byte Accessors
+/// Reading and writing.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+ /// Reads the given number of bytes from memory. Returns them as a slice.
+ ///
+ /// Performs appropriate bounds checks.
pub fn read_bytes(
&self,
ptr: Scalar<M::PointerTag>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
- if size.bytes() == 0 {
- Ok(&[])
- } else {
- let ptr = self.force_ptr(ptr)?;
- self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
- }
+ let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
+ Some(ptr) => ptr,
+ None => return Ok(&[]), // zero-sized access
+ };
+ self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
}
-}
-/// Reading and writing.
-impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+ /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
+ ///
+ /// Performs appropriate bounds checks.
+ pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
+ let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
+ self.get(ptr.alloc_id)?.read_c_str(self, ptr)
+ }
+
+ /// Performs appropriate bounds checks.
pub fn copy(
&mut self,
src: Scalar<M::PointerTag>,
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
}
+ /// Performs appropriate bounds checks.
pub fn copy_repeatedly(
&mut self,
src: Scalar<M::PointerTag>,
length: u64,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
- self.check_align(src, src_align)?;
- self.check_align(dest, dest_align)?;
- if size.bytes() == 0 {
- // Nothing to do for ZST, other than checking alignment and
- // non-NULLness which already happened.
- return Ok(());
- }
- let src = self.force_ptr(src)?;
- let dest = self.force_ptr(dest)?;
+ // We need to check *both* before early-aborting due to the size being 0.
+ let (src, dest) = match (self.check_ptr_access(src, size, src_align)?,
+ self.check_ptr_access(dest, size * length, dest_align)?)
+ {
+ (Some(src), Some(dest)) => (src, dest),
+ // One of the two sizes is 0.
+ _ => return Ok(()),
+ };
// first copy the relocations to a temporary buffer, because
// `get_bytes_mut` will clear the relocations, which is correct,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
match scalar {
Scalar::Ptr(ptr) => Ok(ptr),
- _ => M::int_to_ptr(scalar.to_usize(self)?, &self.extra)
+ _ => M::int_to_ptr(scalar.to_usize(self)?, self)
}
}
) -> InterpResult<'tcx, u128> {
match scalar.to_bits_or_ptr(size, self) {
Ok(bits) => Ok(bits),
- Err(ptr) => Ok(M::ptr_to_int(ptr, &self.extra)? as u128)
+ Err(ptr) => Ok(M::ptr_to_int(ptr, self)? as u128)
}
}
}