// FIXME(solson)
let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
- let discr_dest = dest_ptr.offset(discr_offset, self.memory.layout)?;
+ let discr_dest = dest_ptr.offset(discr_offset, &self)?;
self.memory.write_uint(discr_dest, discr_val, discr_size)?;
let dest = Lvalue::Ptr {
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr()?;
- let dest = dest.offset(offset.bytes(), self.memory.layout)?;
+ let dest = dest.offset(offset.bytes(), &self)?;
let dest_size = self.type_size(ty)?
.expect("bad StructWrappedNullablePointer discrfield");
self.memory.write_int(dest, 0, dest_size)?;
let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
for i in 0..length {
- let elem_dest = dest.offset(i * elem_size, self.memory.layout)?;
+ let elem_dest = dest.offset(i * elem_size, &self)?;
self.write_value_to_ptr(value, elem_dest, elem_ty)?;
}
}
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
let offset = offset.overflowing_mul(pointee_size).0;
- ptr.wrapping_signed_offset(offset, self.memory.layout)
+ ptr.wrapping_signed_offset(offset, self)
}
pub(super) fn pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> {
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
return if let Some(offset) = offset.checked_mul(pointee_size) {
- let ptr = ptr.signed_offset(offset, self.memory.layout)?;
+ let ptr = ptr.signed_offset(offset, self)?;
// Do not do bounds-checking for integers; they can never alias a normal pointer anyway.
if let PrimVal::Ptr(ptr) = ptr.into_inner_primval() {
self.memory.check_bounds(ptr, false)?;
let field_1_ty = self.get_field_ty(ty, 1)?;
let field_0_size = self.type_size(field_0_ty)?.expect("pair element type must be sized");
let field_1_size = self.type_size(field_1_ty)?.expect("pair element type must be sized");
- let layout = self.memory.layout;
- self.memory.write_primval(ptr.offset(field_0, layout)?.into(), a, field_0_size)?;
- self.memory.write_primval(ptr.offset(field_1, layout)?.into(), b, field_1_size)?;
+ let field_0_ptr = ptr.offset(field_0, &self)?.into();
+ let field_1_ptr = ptr.offset(field_1, &self)?.into();
+ self.memory.write_primval(field_0_ptr, a, field_0_size)?;
+ self.memory.write_primval(field_1_ptr, b, field_1_size)?;
Ok(())
}
Ok(p.to_value())
} else {
trace!("reading fat pointer extra of type {}", pointee_ty);
- let extra = ptr.offset(self.memory.pointer_size(), self.memory.layout)?;
+ let extra = ptr.offset(self.memory.pointer_size(), self)?;
match self.tcx.struct_tail(pointee_ty).sty {
ty::TyDynamic(..) => Ok(p.to_value_with_vtable(self.memory.read_ptr(extra)?.to_ptr()?)),
ty::TySlice(..) |
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
- let src_f_ptr = src_ptr.offset(src_field_offset, self.memory.layout)?;
- let dst_f_ptr = dest.offset(dst_field_offset, self.memory.layout)?;
+ let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
+ let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
if src_fty == dst_fty {
self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
} else {
_ => offset.bytes(),
};
- let ptr = base_ptr.offset(offset, self.memory.layout)?;
+ let ptr = base_ptr.offset(offset, &self)?;
let field_ty = self.monomorphize(field_ty, self.substs());
let usize = self.tcx.types.usize;
let n = self.value_to_primval(n_ptr, usize)?.to_u64()?;
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
- let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
+ let ptr = base_ptr.offset(n * elem_size, &self)?;
(ptr, LvalueExtra::None, aligned)
}
u64::from(offset)
};
- let ptr = base_ptr.offset(index * elem_size, self.memory.layout)?;
+ let ptr = base_ptr.offset(index * elem_size, &self)?;
(ptr, LvalueExtra::None, aligned)
}
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(u64::from(from) <= n - u64::from(to));
- let ptr = base_ptr.offset(u64::from(from) * elem_size, self.memory.layout)?;
+ let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
(ptr, extra, aligned)
}
use std::{fmt, iter, ptr, mem, io};
use rustc::ty;
-use rustc::ty::layout::{self, TargetDataLayout};
+use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
use syntax::ast::Mutability;
use error::{EvalError, EvalResult};
MemoryPointer { alloc_id, offset }
}
- pub(crate) fn wrapping_signed_offset<L: PointerArithmetic>(self, i: i64, l: L) -> Self {
- MemoryPointer::new(self.alloc_id, l.wrapping_signed_offset(self.offset, i))
+ pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+ MemoryPointer::new(self.alloc_id, cx.data_layout().wrapping_signed_offset(self.offset, i))
}
- pub(crate) fn overflowing_signed_offset<L: PointerArithmetic>(self, i: i128, l: L) -> (Self, bool) {
- let (res, over) = l.overflowing_signed_offset(self.offset, i);
+ pub(crate) fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
(MemoryPointer::new(self.alloc_id, res), over)
}
- pub(crate) fn signed_offset<L: PointerArithmetic>(self, i: i64, l: L) -> EvalResult<'tcx, Self> {
- Ok(MemoryPointer::new(self.alloc_id, l.signed_offset(self.offset, i)?))
+ pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ Ok(MemoryPointer::new(self.alloc_id, cx.data_layout().signed_offset(self.offset, i)?))
}
- pub(crate) fn overflowing_offset<L: PointerArithmetic>(self, i: u64, l: L) -> (Self, bool) {
- let (res, over) = l.overflowing_offset(self.offset, i);
+ pub(crate) fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
(MemoryPointer::new(self.alloc_id, res), over)
}
- pub(crate) fn offset<L: PointerArithmetic>(self, i: u64, l: L) -> EvalResult<'tcx, Self> {
- Ok(MemoryPointer::new(self.alloc_id, l.offset(self.offset, i)?))
+ pub(crate) fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+ Ok(MemoryPointer::new(self.alloc_id, cx.data_layout().offset(self.offset, i)?))
}
}
self.memory().layout
}
}
+
+impl<'c, 'b, 'a, 'tcx> layout::HasDataLayout for &'c &'b mut EvalContext<'a, 'tcx> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.memory().layout
+ }
+}
Ok(match bin_op {
Sub =>
// The only way this can overflow is by underflowing, so signdeness of the right operands does not matter
- map_to_primval(left.overflowing_signed_offset(-right, self.memory.layout)),
+ map_to_primval(left.overflowing_signed_offset(-right, self)),
Add if signed =>
- map_to_primval(left.overflowing_signed_offset(right, self.memory.layout)),
+ map_to_primval(left.overflowing_signed_offset(right, self)),
Add if !signed =>
- map_to_primval(left.overflowing_offset(right as u64, self.memory.layout)),
+ map_to_primval(left.overflowing_offset(right as u64, self)),
BitAnd if !signed => {
let base_mask : u64 = !(self.memory.get(left.alloc_id)?.align - 1);
Layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
if variant_index as u64 != nndiscr {
let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
- let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(offset.bytes(), self.memory.layout)?;
+ let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(offset.bytes(), &self)?;
trace!("struct wrapped nullable pointer type: {}", ty);
// only the pointer part of a fat pointer is used for this space optimization
let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
Value::ByRef(ptr, aligned) => {
assert!(aligned, "Unaligned ByRef-values cannot occur as function arguments");
for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) {
- let arg = Value::ByRef(ptr.offset(offset, self.memory.layout)?, true);
+ let arg = Value::ByRef(ptr.offset(offset, &self)?, true);
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty);
self.write_value(arg, dest, ty)?;
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let (_, vtable) = self.eval_operand(&arg_operands[0])?.into_ptr_vtable_pair(&mut self.memory)?;
- let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3), self.memory.layout)?)?;
+ let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3), &self)?)?;
let instance = self.memory.get_fn(fn_ptr.to_ptr()?)?;
let mut arg_operands = arg_operands.to_vec();
let ty = self.operand_ty(&arg_operands[0]);
StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
let (offset, ty) = self.nonnull_offset_and_ty(adt_ty, nndiscr, discrfield)?;
- let nonnull = adt_ptr.offset(offset.bytes(), self.memory.layout)?;
+ let nonnull = adt_ptr.offset(offset.bytes(), self)?;
trace!("struct wrapped nullable pointer type: {}", ty);
// only the pointer part of a fat pointer is used for this space optimization
let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8;
let num = self.value_to_primval(args[2], usize)?.to_u64()?;
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position(|&c| c == val) {
- let new_ptr = ptr.offset(num - idx as u64 - 1, self.memory.layout)?;
+ let new_ptr = ptr.offset(num - idx as u64 - 1, &self)?;
self.write_ptr(dest, new_ptr, dest_ty)?;
} else {
self.write_null(dest, dest_ty)?;
let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8;
let num = self.value_to_primval(args[2], usize)?.to_u64()?;
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position(|&c| c == val) {
- let new_ptr = ptr.offset(idx as u64, self.memory.layout)?;
+ let new_ptr = ptr.offset(idx as u64, &self)?;
self.write_ptr(dest, new_ptr, dest_ty)?;
} else {
self.write_null(dest, dest_ty)?;
if let Some((name, value)) = new {
// +1 for the null terminator
let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, Kind::Env)?;
- let layout = self.memory.layout;
self.memory.write_bytes(value_copy.into(), &value)?;
- self.memory.write_bytes(value_copy.offset(value.len() as u64, layout)?.into(), &[0])?;
+ let trailing_null = value_copy.offset(value.len() as u64, &self)?.into();
+ self.memory.write_bytes(trailing_null, &[0])?;
if let Some(var) = self.env_vars.insert(name.to_owned(), value_copy) {
self.memory.deallocate(var, None, Kind::Env)?;
}
let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr(vtable, drop)?;
- let layout = self.memory.layout;
- self.memory.write_usize(vtable.offset(ptr_size, layout)?, size)?;
- self.memory.write_usize(vtable.offset(ptr_size * 2, layout)?, align)?;
+ let size_ptr = vtable.offset(ptr_size, &self)?;
+ self.memory.write_usize(size_ptr, size)?;
+ let align_ptr = vtable.offset(ptr_size * 2, &self)?;
+ self.memory.write_usize(align_ptr, align)?;
for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() {
if let Some((def_id, substs)) = method {
let instance = eval_context::resolve(self.tcx, def_id, substs);
let fn_ptr = self.memory.create_fn_alloc(instance);
- self.memory.write_ptr(vtable.offset(ptr_size * (3 + i as u64), layout)?, fn_ptr)?;
+ let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
+ self.memory.write_ptr(method_ptr, fn_ptr)?;
}
}
pub fn read_size_and_align_from_vtable(&self, vtable: MemoryPointer) -> EvalResult<'tcx, (u64, u64)> {
let pointer_size = self.memory.pointer_size();
- let size = self.memory.read_usize(vtable.offset(pointer_size, self.memory.layout)?)?;
- let align = self.memory.read_usize(vtable.offset(pointer_size * 2, self.memory.layout)?)?;
+ let size = self.memory.read_usize(vtable.offset(pointer_size, self)?)?;
+ let align = self.memory.read_usize(vtable.offset(pointer_size * 2, self)?)?;
Ok((size, align))
}
use error::{EvalError, EvalResult};
use memory::{Memory, MemoryPointer, HasMemory, PointerArithmetic};
+use rustc::ty::layout::HasDataLayout;
pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
f32::from_bits(bytes as u32)
self.primval
}
- pub(crate) fn signed_offset<L: PointerArithmetic>(self, i: i64, layout: L) -> EvalResult<'tcx, Self> {
+ pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
}
}
- pub(crate) fn offset<L: PointerArithmetic>(self, i: u64, layout: L) -> EvalResult<'tcx, Self> {
+ pub(crate) fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+ let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
}
}
- pub(crate) fn wrapping_signed_offset<L: PointerArithmetic>(self, i: i64, layout: L) -> EvalResult<'tcx, Self> {
+ pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);