use rustc::ty::{self, Ty};
-use rustc::ty::layout::LayoutOf;
+use rustc::ty::layout::{Align, LayoutOf};
use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc::mir;
use syntax::attr;
if size == 0 {
self.write_null(dest, dest_ty)?;
} else {
- let align = self.memory.pointer_size();
+ let align = self.tcx.data_layout.pointer_align;
let ptr = self.memory.allocate(size, align, Some(MemoryKind::C.into()))?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
// +1 for the null terminator
let value_copy = self.memory.allocate(
(value.len() + 1) as u64,
- 1,
+ Align::from_bytes(1, 1).unwrap(),
Some(MemoryKind::Env.into()),
)?;
self.memory.write_bytes(value_copy.into(), &value)?;
"sysconf" => {
let name = self.value_to_primval(args[0])?.to_u64()?;
+ let name_align = self.layout_of(args[0].ty)?.align;
+
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
let paths = &[
Some(ptr) => ptr,
None => eval_body(self.tcx, instance, ty::ParamEnv::empty(traits::Reveal::All))?.0,
};
- let val = self.value_to_primval(ValTy { value: Value::ByRef(val), ty: args[0].ty })?.to_u64()?;
+ let val = self.value_to_primval(ValTy { value: Value::ByRef(val, name_align),
+ ty: args[0].ty })?.to_u64()?;
if val == name {
result = Some(path_value);
break;
// Hook pthread calls that go to the thread-local storage memory subsystem
"pthread_key_create" => {
let key_ptr = self.into_ptr(args[0].value)?;
+ let key_align = self.layout_of(args[0].ty)?.align;
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
let dtor = match self.into_ptr(args[1].value)?.into_inner_primval() {
}
self.memory.write_primval(
key_ptr.to_ptr()?,
+ key_align,
PrimVal::Bytes(key),
key_size.bytes(),
false,
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
- let ptr = self.memory.allocate(size, align, Some(MemoryKind::Rust.into()))?;
+ let ptr = self.memory.allocate(size,
+ Align::from_bytes(align, align).unwrap(),
+ Some(MemoryKind::Rust.into()))?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
"alloc::heap::::__rust_alloc_zeroed" => {
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
- let ptr = self.memory.allocate(size, align, Some(MemoryKind::Rust.into()))?;
+ let ptr = self.memory.allocate(size,
+ Align::from_bytes(align, align).unwrap(),
+ Some(MemoryKind::Rust.into()))?;
self.memory.write_repeat(ptr.into(), 0, size)?;
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
}
self.memory.deallocate(
ptr,
- Some((old_size, align)),
+ Some((old_size, Align::from_bytes(align, align).unwrap())),
MemoryKind::Rust.into(),
)?;
}
let new_ptr = self.memory.reallocate(
ptr,
old_size,
- old_align,
+ Align::from_bytes(old_align, old_align).unwrap(),
new_size,
- new_align,
+ Align::from_bytes(new_align, new_align).unwrap(),
MemoryKind::Rust.into(),
)?;
self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?;
use rustc::ty::layout::{TyLayout, LayoutOf};
use rustc::ty;
-use rustc::mir::interpret::{EvalResult, PrimVal, PrimValKind, Value, Pointer, AccessKind, PtrAndAlign};
+use rustc::mir::interpret::{EvalResult, PrimVal, PrimValKind, Value, Pointer};
use rustc_mir::interpret::{Place, PlaceExtra, HasMemory, EvalContext, ValTy};
use helpers::EvalContextExt as HelperEvalContextExt;
"atomic_load_acq" |
"volatile_load" => {
let ptr = self.into_ptr(args[0].value)?;
+ let align = self.layout_of(args[0].ty)?.align;
+
let valty = ValTy {
- value: Value::by_ref(ptr),
+ value: Value::ByRef(ptr, align),
ty: substs.type_at(0),
};
self.write_value(valty, dest)?;
"atomic_store_rel" |
"volatile_store" => {
let ty = substs.type_at(0);
+ let align = self.layout_of(ty)?.align;
let dest = self.into_ptr(args[0].value)?;
- self.write_value_to_ptr(args[1].value, dest, ty)?;
+ self.write_value_to_ptr(args[1].value, dest, align, ty)?;
}
"atomic_fence_acq" => {
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ty = substs.type_at(0);
+ let align = self.layout_of(ty)?.align;
let ptr = self.into_ptr(args[0].value)?;
let change = self.value_to_primval(args[1])?;
- let old = self.read_value(ptr, ty)?;
+ let old = self.read_value(ptr, align, ty)?;
let old = match old {
Value::ByVal(val) => val,
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
};
self.write_primval(dest, old, ty)?;
self.write_primval(
- Place::from_primval_ptr(ptr),
+ Place::from_primval_ptr(ptr, align),
change,
ty,
)?;
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ty = substs.type_at(0);
+ let align = self.layout_of(ty)?.align;
let ptr = self.into_ptr(args[0].value)?;
let expect_old = self.value_to_primval(args[1])?;
let change = self.value_to_primval(args[2])?;
- let old = self.read_value(ptr, ty)?;
+ let old = self.read_value(ptr, align, ty)?;
let old = match old {
Value::ByVal(val) => val,
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
};
self.write_value(valty, dest)?;
self.write_primval(
- Place::from_primval_ptr(ptr),
+ Place::from_primval_ptr(ptr, dest_layout.align),
change,
ty,
)?;
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ty = substs.type_at(0);
+ let align = self.layout_of(ty)?.align;
let ptr = self.into_ptr(args[0].value)?;
let change = self.value_to_primval(args[1])?;
- let old = self.read_value(ptr, ty)?;
+ let old = self.read_value(ptr, align, ty)?;
let old = match old {
Value::ByVal(val) => val,
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
};
// FIXME: what do atomics do on overflow?
let (val, _) = self.binary_op(op, old, ty, change, ty)?;
- self.write_primval(Place::from_primval_ptr(ptr), val, ty)?;
+ self.write_primval(Place::from_primval_ptr(ptr, dest_layout.align), val, ty)?;
}
"breakpoint" => unimplemented!(), // halt miri
if count * elem_size != 0 {
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
// Also see the write_bytes intrinsic.
- let elem_align = elem_layout.align.abi();
+ let elem_align = elem_layout.align;
let src = self.into_ptr(args[0].value)?;
+ let src_align = self.layout_of(args[0].ty)?.align;
let dest = self.into_ptr(args[1].value)?;
self.memory.copy(
src,
+ src_align,
dest,
- count * elem_size,
elem_align,
+ count * elem_size,
intrinsic_name.ends_with("_nonoverlapping"),
)?;
}
"discriminant_value" => {
let ty = substs.type_at(0);
let adt_ptr = self.into_ptr(args[0].value)?;
- let place = Place::from_primval_ptr(adt_ptr);
+ let adt_align = self.layout_of(args[0].ty)?.align;
+ let place = Place::from_primval_ptr(adt_ptr, adt_align);
let discr_val = self.read_discriminant_value(place, ty)?;
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_layout.ty)?;
}
let size = dest_layout.size.bytes();
let init = |this: &mut Self, val: Value| {
let zero_val = match val {
- Value::ByRef(PtrAndAlign { ptr, .. }) => {
+ Value::ByRef(ptr, _) => {
// These writes have no alignment restriction anyway.
this.memory.write_repeat(ptr, 0, size)?;
val
let ptr = this.alloc_ptr(dest_layout.ty)?;
let ptr = Pointer::from(PrimVal::Ptr(ptr));
this.memory.write_repeat(ptr, 0, size)?;
- Value::by_ref(ptr)
+ Value::ByRef(ptr, dest_layout.align)
}
}
}
match dest {
Place::Local { frame, local } => self.modify_local(frame, local, init)?,
Place::Ptr {
- ptr: PtrAndAlign { ptr, aligned: true },
+ ptr: ptr,
+ align: _align,
extra: PlaceExtra::None,
} => self.memory.write_repeat(ptr, 0, size)?,
- Place::Ptr { .. } => {
- bug!("init intrinsic tried to write to fat or unaligned ptr target")
- }
+ _ => bug!("TODO"),
}
}
"move_val_init" => {
let ty = substs.type_at(0);
let ptr = self.into_ptr(args[0].value)?;
- self.write_value_to_ptr(args[1].value, ptr, ty)?;
+ let align = self.layout_of(args[0].ty)?.align;
+ self.write_value_to_ptr(args[1].value, ptr, align, ty)?;
}
"needs_drop" => {
"transmute" => {
let src_ty = substs.type_at(0);
+ let src_align = self.layout_of(src_ty)?.align;
let ptr = self.force_allocation(dest)?.to_ptr()?;
- self.write_maybe_aligned_mut(
- /*aligned*/
- false,
- |ectx| {
- ectx.write_value_to_ptr(args[0].value, ptr.into(), src_ty)
- },
- )?;
+ let dest_align = self.layout_of(substs.type_at(1))?.align;
+ self.write_value_to_ptr(args[0].value, ptr.into(), dest_align, src_ty);
}
"unchecked_shl" => {
"uninit" => {
let size = dest_layout.size.bytes();
let uninit = |this: &mut Self, val: Value| match val {
- Value::ByRef(PtrAndAlign { ptr, .. }) => {
+ Value::ByRef(ptr, _) => {
this.memory.mark_definedness(ptr, size, false)?;
Ok(val)
}
match dest {
Place::Local { frame, local } => self.modify_local(frame, local, uninit)?,
Place::Ptr {
- ptr: PtrAndAlign { ptr, aligned: true },
+ ptr: ptr,
+ align: _align,
extra: PlaceExtra::None,
} => self.memory.mark_definedness(ptr, size, false)?,
- Place::Ptr { .. } => {
- bug!("uninit intrinsic tried to write to fat or unaligned ptr target")
- }
+ _ => bug!("todo"),
}
}
if count > 0 {
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
// TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
- self.memory.check_align(ptr, ty_layout.align.abi(), Some(AccessKind::Write))?;
+ self.memory.check_align(ptr, ty_layout.align)?;
self.memory.write_repeat(ptr, val_byte, ty_layout.size.bytes() * count)?;
}
}
use rustc::ty::{self, TyCtxt};
use rustc::ty::layout::{TyLayout, LayoutOf};
use rustc::hir::def_id::DefId;
-use rustc::ty::subst::Substs;
use rustc::mir;
use rustc::traits;
// Return value
let size = ecx.tcx.data_layout.pointer_size.bytes();
- let align = ecx.tcx.data_layout.pointer_align.abi();
+ let align = ecx.tcx.data_layout.pointer_align;
let ret_ptr = ecx.memory_mut().allocate(size, align, Some(MemoryKind::Stack))?;
cleanup_ptr = Some(ret_ptr);
start_instance,
start_mir.span,
start_mir,
- Place::from_ptr(ret_ptr),
+ Place::from_ptr(ret_ptr, align),
StackPopCleanup::None,
)?;
let ty = ecx.tcx.mk_imm_ptr(ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8));
let foo = ecx.memory.allocate_cached(b"foo\0");
let ptr_size = ecx.memory.pointer_size();
- let foo_ptr = ecx.memory.allocate(ptr_size * 1, ptr_size, None)?;
- ecx.memory.write_primval(foo_ptr.into(), PrimVal::Ptr(foo.into()), ptr_size, false)?;
+ let ptr_align = ecx.tcx.data_layout.pointer_align;
+ let foo_ptr = ecx.memory.allocate(ptr_size, ptr_align, None)?;
+ ecx.memory.write_primval(foo_ptr, ptr_align, PrimVal::Ptr(foo.into()), ptr_size, false)?;
ecx.memory.mark_static_initalized(foo_ptr.alloc_id, Mutability::Immutable)?;
ecx.write_ptr(dest, foo_ptr.into(), ty)?;
Ok(())
}
- let mut ecx = EvalContext::new(tcx, ty::ParamEnv::empty(traits::Reveal::All), limits, Default::default(), Default::default(), Substs::empty());
+ let mut ecx = EvalContext::new(tcx, ty::ParamEnv::empty(traits::Reveal::All), limits, Default::default(), Default::default());
match run_main(&mut ecx, main_id, start_wrapper) {
Ok(()) => {
let leaks = ecx.memory().leak_report();
// FIXME: check that it's `#[linkage = "extern_weak"]`
trace!("Initializing an extern global with NULL");
let ptr_size = ecx.memory.pointer_size();
+ let ptr_align = ecx.tcx.data_layout.pointer_align;
let ptr = ecx.memory.allocate(
ptr_size,
- ptr_size,
+ ptr_align,
None,
)?;
- ecx.memory.write_ptr_sized_unsigned(ptr, PrimVal::Bytes(0))?;
+ ecx.memory.write_ptr_sized_unsigned(ptr, ptr_align, PrimVal::Bytes(0))?;
ecx.memory.mark_static_initalized(ptr.alloc_id, mutability)?;
ecx.tcx.interpret_interner.borrow_mut().cache(
GlobalId {
instance,
promoted: None,
},
- PtrAndAlign {
- ptr: ptr.into(),
- aligned: true,
- },
+ ptr.into(),
);
Ok(())
}