//! integer. It is crucial that these operations call `check_align` *before*
//! short-circuiting the empty case!
+use std::borrow::Cow;
use std::collections::VecDeque;
use std::ptr;
-use std::borrow::Cow;
-use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
-use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
-use rustc_data_structures::fx::{FxHashSet, FxHashMap};
+use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
+use rustc::ty::{self, query::TyCtxtAt, Instance, ParamEnv};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use syntax::ast::Mutability;
use super::{
- Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
- InterpResult, Scalar, GlobalAlloc, PointerArithmetic,
- Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg,
+ AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, ErrorHandled, GlobalAlloc,
+ GlobalId, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
};
#[derive(Debug, PartialEq, Copy, Clone)]
MemoryKind::Stack => false,
MemoryKind::Vtable => true,
MemoryKind::CallerLocation => true,
- MemoryKind::Machine(k) => k.may_leak()
+ MemoryKind::Machine(k) => k.may_leak(),
}
}
}
impl<'tcx, Other> FnVal<'tcx, Other> {
pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
match self {
- FnVal::Instance(instance) =>
- Ok(instance),
- FnVal::Other(_) => throw_unsup_format!(
- "'foreign' function pointers are not supported in this context"
- ),
+ FnVal::Instance(instance) => Ok(instance),
+ FnVal::Other(_) => {
+ throw_unsup_format!("'foreign' function pointers are not supported in this context")
+ }
}
}
}
// carefully copy only the reachable parts.
impl<'mir, 'tcx, M> Clone for Memory<'mir, 'tcx, M>
where
- M: Machine<'mir, 'tcx, PointerTag = (), AllocExtra = (), MemoryExtra = ()>,
+ M: Machine<'mir, 'tcx, PointerTag = (), AllocExtra = ()>,
+ M::MemoryExtra: Copy,
M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
{
fn clone(&self) -> Self {
alloc_map: self.alloc_map.clone(),
extra_fn_ptr_map: self.extra_fn_ptr_map.clone(),
dead_alloc_map: self.dead_alloc_map.clone(),
- extra: (),
+ extra: self.extra,
tcx: self.tcx,
}
}
pub fn create_fn_alloc(
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
- ) -> Pointer<M::PointerTag>
- {
+ ) -> Pointer<M::PointerTag> {
let id = match fn_val {
FnVal::Instance(instance) => self.tcx.alloc_map.lock().create_fn_alloc(instance),
FnVal::Other(extra) => {
kind: MemoryKind<M::MemoryKinds>,
) -> Pointer<M::PointerTag> {
let id = self.tcx.alloc_map.lock().reserve();
- debug_assert_ne!(Some(kind), M::STATIC_KIND.map(MemoryKind::Machine),
- "dynamically allocating static memory");
+ debug_assert_ne!(
+ Some(kind),
+ M::STATIC_KIND.map(MemoryKind::Machine),
+ "dynamically allocating static memory"
+ );
let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
self.alloc_map.insert(id, (kind, alloc.into_owned()));
Pointer::from(id).with_tag(tag)
Some((size, _align)) => size,
None => self.get_raw(ptr.alloc_id)?.size,
};
- self.copy(
- ptr,
- new_ptr,
- old_size.min(new_size),
- /*nonoverlapping*/ true,
- )?;
+ self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
self.deallocate(ptr, old_size_and_align, kind)?;
Ok(new_ptr)
AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
// Don't forget to remember size and align of this now-dead allocation
- let old = self.dead_alloc_map.insert(
- ptr.alloc_id,
- (alloc.size, alloc.align)
- );
+ let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size, alloc.align));
if old.is_some() {
bug!("Nothing can be deallocated twice");
}
// got picked we might be aligned even if this check fails.
// We instead have to fall back to converting to an integer and checking
// the "real" alignment.
- throw_unsup!(AlignmentCheckFailed {
- has: alloc_align,
- required: align,
- });
+ throw_unsup!(AlignmentCheckFailed { has: alloc_align, required: align });
}
check_offset_align(ptr.offset.bytes(), align)?;
}
}
/// Test if the pointer might be NULL.
- pub fn ptr_may_be_null(
- &self,
- ptr: Pointer<M::PointerTag>,
- ) -> bool {
- let (size, _align) = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
+ pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
+ let (size, _align) = self
+ .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
.expect("alloc info with MaybeDead cannot fail");
ptr.check_inbounds_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
}
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
let alloc = tcx.alloc_map.lock().get(id);
let alloc = match alloc {
- Some(GlobalAlloc::Memory(mem)) =>
- Cow::Borrowed(mem),
- Some(GlobalAlloc::Function(..)) =>
- throw_unsup!(DerefFunctionPointer),
- None =>
- throw_unsup!(DanglingPointerDeref),
+ Some(GlobalAlloc::Memory(mem)) => Cow::Borrowed(mem),
+ Some(GlobalAlloc::Function(..)) => throw_unsup!(DerefFunctionPointer),
+ None => throw_unsup!(DanglingPointerDeref),
Some(GlobalAlloc::Static(def_id)) => {
// We got a "lazy" static that has not been computed yet.
if tcx.is_foreign_item(def_id) {
} else {
trace!("static_alloc: Need to compute {:?}", def_id);
let instance = Instance::mono(tcx.tcx, def_id);
- let gid = GlobalId {
- instance,
- promoted: None,
- };
+ let gid = GlobalId { instance, promoted: None };
// use the raw query here to break validation cycles. Later uses of the static
// will call the full query anyway
- let raw_const = tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid))
- .map_err(|err| {
+ let raw_const =
+ tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
// no need to report anything, the const_eval call takes care of that
// for statics
assert!(tcx.is_static(def_id));
match err {
- ErrorHandled::Reported =>
- err_inval!(ReferencedConstant),
- ErrorHandled::TooGeneric =>
- err_inval!(TooGeneric),
+ ErrorHandled::Reported => err_inval!(ReferencedConstant),
+ ErrorHandled::TooGeneric => err_inval!(TooGeneric),
}
})?;
// Make sure we use the ID of the resolved memory, not the lazy one!
let id = raw_const.alloc_id;
let allocation = tcx.alloc_map.lock().unwrap_memory(id);
- M::before_access_static(allocation)?;
+ M::before_access_static(memory_extra, allocation)?;
Cow::Borrowed(allocation)
}
}
// Need to put it into the map and return a ref to that
let kind = M::STATIC_KIND.expect(
"I got an owned allocation that I have to copy but the machine does \
- not expect that to happen"
+ not expect that to happen",
);
Ok((MemoryKind::Machine(kind), alloc))
}
// Now unpack that funny error type
match a {
Ok(a) => Ok(&a.1),
- Err(a) => a
+ Err(a) => a,
}
}
// Need to make a copy, even if `get_static_alloc` is able
// to give us a cheap reference.
let alloc = Self::get_static_alloc(memory_extra, tcx, id)?;
- if alloc.mutability == Mutability::Immutable {
+ if alloc.mutability == Mutability::Not {
throw_unsup!(ModifiedConstantMemory)
}
match M::STATIC_KIND {
Err(e) => Err(e),
Ok(a) => {
let a = &mut a.1;
- if a.mutability == Mutability::Immutable {
+ if a.mutability == Mutability::Not {
throw_unsup!(ModifiedConstantMemory)
}
Ok(a)
let ty = self.tcx.type_of(did);
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
Ok((layout.size, layout.align.abi))
- },
+ }
Some(GlobalAlloc::Memory(alloc)) =>
+ {
// Need to duplicate the logic here, because the global allocations have
// different associated types than the interpreter-local ones.
- Ok((alloc.size, alloc.align)),
- Some(GlobalAlloc::Function(_)) =>
- bug!("We already checked function pointers above"),
+ Ok((alloc.size, alloc.align))
+ }
+ Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
// The rest must be dead.
- None => if let AllocCheck::MaybeDead = liveness {
- // Deallocated pointers are allowed, we should be able to find
- // them in the map.
- Ok(*self.dead_alloc_map.get(&id)
- .expect("deallocated pointers should all be recorded in \
- `dead_alloc_map`"))
- } else {
- throw_unsup!(DanglingPointerDeref)
- },
+ None => {
+ if let AllocCheck::MaybeDead = liveness {
+ // Deallocated pointers are allowed, we should be able to find
+ // them in the map.
+ Ok(*self.dead_alloc_map.get(&id).expect(
+ "deallocated pointers should all be recorded in \
+ `dead_alloc_map`",
+ ))
+ } else {
+ throw_unsup!(DanglingPointerDeref)
+ }
+ }
}
}
}
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
- self.get_raw_mut(id)?.mutability = Mutability::Immutable;
+ self.get_raw_mut(id)?.mutability = Mutability::Not;
Ok(())
}
// Checked definedness (and thus range) and relocations. This access also doesn't
// influence interpreter execution but is only for debugging.
- let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(i..i+1);
+ let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(i..i + 1);
write!(msg, "{:02x} ", bytes[0]).unwrap();
} else {
msg.push_str("__ ");
MemoryKind::Machine(m) => format!(" ({:?})", m),
};
self.dump_alloc_helper(
- &mut allocs_seen, &mut allocs_to_print,
- msg, alloc, extra
+ &mut allocs_seen,
+ &mut allocs_to_print,
+ msg,
+ alloc,
+ extra,
);
- },
+ }
Err(()) => {
// static alloc?
match self.tcx.alloc_map.lock().get(id) {
Some(GlobalAlloc::Memory(alloc)) => {
self.dump_alloc_helper(
- &mut allocs_seen, &mut allocs_to_print,
- msg, alloc, " (immutable)".to_owned()
+ &mut allocs_seen,
+ &mut allocs_to_print,
+ msg,
+ alloc,
+ " (immutable)".to_owned(),
);
}
Some(GlobalAlloc::Function(func)) => {
eprintln!("{} (deallocated)", msg);
}
}
- },
+ }
};
-
}
}
pub fn leak_report(&self) -> usize {
- let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
- if kind.may_leak() { None } else { Some(id) }
- });
+ let leaks: Vec<_> = self
+ .alloc_map
+ .filter_map_collect(|&id, &(kind, _)| if kind.may_leak() { None } else { Some(id) });
let n = leaks.len();
if n > 0 {
eprintln!("### LEAK REPORT ###");
/// Reads the given number of bytes from memory. Returns them as a slice.
///
/// Performs appropriate bounds checks.
- pub fn read_bytes(
- &self,
- ptr: Scalar<M::PointerTag>,
- size: Size,
- ) -> InterpResult<'tcx, &[u8]> {
+ pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
Some(ptr) => ptr,
None => return Ok(&[]), // zero-sized access
pub fn write_bytes(
&mut self,
ptr: Scalar<M::PointerTag>,
- src: impl IntoIterator<Item=u8>,
- ) -> InterpResult<'tcx>
- {
+ src: impl IntoIterator<Item = u8>,
+ ) -> InterpResult<'tcx> {
let src = src.into_iter();
let size = Size::from_bytes(src.size_hint().0 as u64);
// `write_bytes` checks that this lower bound matches the upper bound matches reality.
// since we don't want to keep any relocations at the target.
// (`get_bytes_with_undef_and_ptr` below checks that there are no
// relocations overlapping the edges; those would not be handled correctly).
- let relocations = self.get_raw(src.alloc_id)?
- .prepare_relocation_copy(self, src, size, dest, length);
+ let relocations =
+ self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
let tcx = self.tcx.tcx;
+ // The bits have to be saved locally before writing to dest in case src and dest overlap.
+ assert_eq!(size.bytes() as usize as u64, size.bytes());
+
// This checks relocation edges on the src.
- let src_bytes = self.get_raw(src.alloc_id)?
- .get_bytes_with_undef_and_ptr(&tcx, src, size)?
- .as_ptr();
- let dest_bytes = self.get_raw_mut(dest.alloc_id)?
- .get_bytes_mut(&tcx, dest, size * length)?
- .as_mut_ptr();
+ let src_bytes =
+ self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
+ let dest_bytes =
+ self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?;
+
+ // If `dest_bytes` is empty we just optimize to not run anything for zsts.
+ // See #67539
+ if dest_bytes.is_empty() {
+ return Ok(());
+ }
+
+ let dest_bytes = dest_bytes.as_mut_ptr();
+
+ // Prepare a copy of the undef mask.
+ let compressed = self.get_raw(src.alloc_id)?.compress_undef_range(src, size);
+
+ if compressed.all_bytes_undef() {
+ // Fast path: If all bytes are `undef` then there is nothing to copy. The target range
+ // is marked as undef but we otherwise omit changing the byte representation which may
+ // be arbitrary for undef bytes.
+ // This also avoids writing to the target bytes so that the backing allocation is never
+ // touched if the bytes stay undef for the whole interpreter execution. On contemporary
+ // operating system this can avoid physically allocating the page.
+ let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
+ dest_alloc.mark_definedness(dest, size * length, false);
+ dest_alloc.mark_relocation_range(relocations);
+ return Ok(());
+ }
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
assert_eq!(size.bytes() as usize as u64, size.bytes());
if src.alloc_id == dest.alloc_id {
if nonoverlapping {
- if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
- (dest.offset <= src.offset && dest.offset + size > src.offset)
+ if (src.offset <= dest.offset && src.offset + size > dest.offset)
+ || (dest.offset <= src.offset && dest.offset + size > src.offset)
{
- throw_ub_format!(
- "copy_nonoverlapping called on overlapping ranges"
- )
+ throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
}
}
for i in 0..length {
- ptr::copy(src_bytes,
- dest_bytes.offset((size.bytes() * i) as isize),
- size.bytes() as usize);
+ ptr::copy(
+ src_bytes,
+ dest_bytes.offset((size.bytes() * i) as isize),
+ size.bytes() as usize,
+ );
}
} else {
for i in 0..length {
- ptr::copy_nonoverlapping(src_bytes,
- dest_bytes.offset((size.bytes() * i) as isize),
- size.bytes() as usize);
+ ptr::copy_nonoverlapping(
+ src_bytes,
+ dest_bytes.offset((size.bytes() * i) as isize),
+ size.bytes() as usize,
+ );
}
}
}
- // copy definedness to the destination
- self.copy_undef_mask(src, dest, size, length)?;
+ // now fill in all the data
+ self.get_raw_mut(dest.alloc_id)?.mark_compressed_undef_range(
+ &compressed,
+ dest,
+ size,
+ length,
+ );
+
// copy the relocations to the destination
self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
}
}
-/// Undefined bytes
+/// Machine pointer introspection.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
- // FIXME: Add a fast version for the common, nonoverlapping case
- fn copy_undef_mask(
- &mut self,
- src: Pointer<M::PointerTag>,
- dest: Pointer<M::PointerTag>,
- size: Size,
- repeat: u64,
- ) -> InterpResult<'tcx> {
- // The bits have to be saved locally before writing to dest in case src and dest overlap.
- assert_eq!(size.bytes() as usize as u64, size.bytes());
-
- let src_alloc = self.get_raw(src.alloc_id)?;
- let compressed = src_alloc.compress_undef_range(src, size);
-
- // now fill in all the data
- let dest_allocation = self.get_raw_mut(dest.alloc_id)?;
- dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat);
-
- Ok(())
- }
-
pub fn force_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
match scalar {
Scalar::Ptr(ptr) => Ok(ptr),
- _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?)
+ _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
}
}
pub fn force_bits(
&self,
scalar: Scalar<M::PointerTag>,
- size: Size
+ size: Size,
) -> InterpResult<'tcx, u128> {
match scalar.to_bits_or_ptr(size, self) {
Ok(bits) => Ok(bits),
- Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128)
+ Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128),
}
}
}