None => self.tcx.item_name(def_id).as_str(),
};
+ let tcx = &{self.tcx.tcx};
+
// All these functions take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
MiriMemoryKind::Rust.into()
)?
.with_default_tag();
- self.memory_mut().write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
+ self.memory_mut()
+ .get_mut(ptr.alloc_id)?
+ .write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
"dlsym" => {
let _handle = self.read_scalar(args[0])?;
let symbol = self.read_scalar(args[1])?.to_ptr()?;
- let symbol_name = self.memory().read_c_str(symbol)?;
+ let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
"getenv" => {
let result = {
let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
- let name = self.memory().read_c_str(name_ptr)?;
+ let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
match self.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(&*self.tcx),
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(self) {
- let name = self.memory().read_c_str(name_ptr.to_ptr()?
- )?.to_owned();
+ let name_ptr = name_ptr.to_ptr()?;
+ let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(self.machine.env_vars.remove(&name));
}
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
- let value = self.memory().read_c_str(value_ptr)?;
+ let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(self) {
- let name = self.memory().read_c_str(name_ptr.to_ptr()?)?;
+ let name_ptr = name_ptr.to_ptr()?;
+ let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
)?.with_default_tag();
- self.memory_mut().write_bytes(value_copy.into(), &value)?;
- let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), self)?.into();
- self.memory_mut().write_bytes(trailing_zero_ptr, &[0])?;
+ {
+ let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?;
+ alloc.write_bytes(tcx, value_copy, &value)?;
+ let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?;
+ alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
+ }
if let Some(var) = self.machine.env_vars.insert(
name.to_owned(),
value_copy,
"strlen" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
- let n = self.memory().read_c_str(ptr)?.len();
+ let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
let key_layout = self.layout_of(key_type)?;
// Create key and write it into the memory where key_ptr wants it
- let key = self.machine.tls.create_tls_key(dtor, &*self.tcx) as u128;
+ let key = self.machine.tls.create_tls_key(dtor, tcx) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
- self.memory_mut().write_scalar(
+
+ self.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
+ self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
+ tcx,
key_ptr,
- key_layout.align.abi,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
)?;
// This just creates a key; Windows does not natively support TLS dtors.
// Create key and return it
- let key = self.machine.tls.create_tls_key(None, &*self.tcx) as u128;
+ let key = self.machine.tls.create_tls_key(None, tcx) as u128;
// Figure out how large a TLS key actually is. This is c::DWORD.
if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
if self.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
-
+ let tcx = &{self.tcx.tcx};
let substs = instance.substs;
// All these intrinsics take raw pointers, so if we access memory directly
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
+ // FIXME: should we check that the destination pointer is aligned even for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
assert!(mplace.meta.is_none());
- self.memory_mut().write_repeat(mplace.ptr, 0, dest.layout.size)?;
+ // not a zst, must be valid pointer
+ let ptr = mplace.ptr.to_ptr()?;
+ self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
}
}
}
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
+ // FIXME: should we check alignment for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(..) => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
assert!(mplace.meta.is_none());
- self.memory_mut().mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
+ let ptr = mplace.ptr.to_ptr()?;
+ self.memory_mut()
+ .get_mut(ptr.alloc_id)?
+ .mark_definedness(ptr, dest.layout.size, false)?;
}
}
}
let ptr = self.read_scalar(args[0])?.not_undef()?;
let count = self.read_scalar(args[2])?.to_usize(self)?;
self.memory().check_align(ptr, ty_layout.align.abi)?;
- self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?;
+ let byte_count = ty_layout.size * count;
+ if byte_count.bytes() != 0 {
+ let ptr = ptr.to_ptr()?;
+ self.memory_mut()
+ .get_mut(ptr.alloc_id)?
+ .write_repeat(tcx, ptr, val_byte, byte_count)?;
+ }
}
name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
// allocations sit right next to each other. The C/C++ standards are
// somewhat fuzzy about this case, so I think for now this check is
// "good enough".
- // We require liveness, as dead allocations can of course overlap.
- self.memory().check_bounds_ptr(left, InboundsCheck::Live)?;
- self.memory().check_bounds_ptr(right, InboundsCheck::Live)?;
- // Two live in-bounds pointers, we can compare across allocations
+ // Dead allocations in miri cannot overlap with live allocations, but
+ // on read hardware this can easily happen. Thus for comparisons we require
+ // both pointers to be live.
+ self.memory().get(left.alloc_id)?.check_bounds_ptr(left)?;
+ self.memory().get(right.alloc_id)?.check_bounds_ptr(right)?;
+ // Two in-bounds pointers, we can compare across allocations
left == right
}
}
// Case I: Comparing with NULL
if bits == 0 {
// Test if the ptr is in-bounds. Then it cannot be NULL.
- if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok() {
+ // Even dangling pointers cannot be NULL.
+ if self.memory().check_bounds_ptr_maybe_dead(ptr).is_ok() {
return Ok(false);
}
}
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
- self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?;
+ let alloc = self.memory().get(ptr.alloc_id)?;
+ alloc.check_bounds_ptr(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
- self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?;
+ alloc.check_bounds_ptr(ptr)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can only be offset by 0, and we pretend there
use crate::{
EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor,
- MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, InboundsCheck,
+ MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra,
Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy,
};
}
// Get the allocation
- self.memory().check_bounds(ptr, size, InboundsCheck::Live)?;
- let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
+ let alloc = self.memory().get(ptr.alloc_id)?;
+ alloc.check_bounds(self, ptr, size)?;
// If we got here, we do some checking, *but* we leave the tag unchanged.
if let Borrow::Shr(Some(_)) = ptr.tag {
assert_eq!(mutability, Some(MutImmutable));
ptr, place.layout.ty, new_bor);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
- self.memory().check_bounds(ptr, size, InboundsCheck::Live)?;
- let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
+ let alloc = self.memory().get(ptr.alloc_id)?;
+ alloc.check_bounds(self, ptr, size)?;
// Update the stacks.
if let Borrow::Shr(Some(_)) = new_bor {
// Reference that cares about freezing. We need a frozen-sensitive reborrow.