}
}
+impl_stable_hash_for!(enum mir::interpret::ScalarMaybeUndef {
+ Scalar(v),
+ Undef
+});
+
impl_stable_hash_for!(enum mir::interpret::Value {
Scalar(v),
ScalarPair(a, b),
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- Bits { bits, defined } => {
+ Bits { bits, size } => {
bits.hash_stable(hcx, hasher);
- defined.hash_stable(hcx, hasher);
+ size.hash_stable(hcx, hasher);
},
Ptr(ptr) => ptr.hash_stable(hcx, hasher),
}
FrameInfo, ConstEvalResult,
};
-pub use self::value::{Scalar, Value, ConstValue};
+pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef};
use std::fmt;
use mir;
/// to allow HIR creation to happen for everything before needing to be able to run constant
/// evaluation
Unevaluated(DefId, &'tcx Substs<'tcx>),
- /// Used only for types with layout::abi::Scalar ABI and ZSTs which use Scalar::undef()
+ /// Used only for types with layout::abi::Scalar ABI and ZSTs
Scalar(Scalar),
/// Used only for types with layout::abi::ScalarPair
ScalarPair(Scalar, Scalar),
impl<'tcx> ConstValue<'tcx> {
#[inline]
- pub fn from_byval_value(val: Value) -> Self {
- match val {
+ pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> {
+ Ok(match val {
Value::ByRef(..) => bug!(),
- Value::ScalarPair(a, b) => ConstValue::ScalarPair(a, b),
- Value::Scalar(val) => ConstValue::Scalar(val),
- }
+ Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.read()?, b.read()?),
+ Value::Scalar(val) => ConstValue::Scalar(val.read()?),
+ })
}
#[inline]
match *self {
ConstValue::Unevaluated(..) |
ConstValue::ByRef(..) => None,
- ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a, b)),
- ConstValue::Scalar(val) => Some(Value::Scalar(val)),
+ ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b.into())),
+ ConstValue::Scalar(val) => Some(Value::Scalar(val.into())),
}
}
#[inline]
- pub fn from_scalar(val: Scalar) -> Self {
- ConstValue::Scalar(val)
- }
-
- #[inline]
- pub fn to_scalar(&self) -> Option<Scalar> {
+ pub fn try_to_scalar(&self) -> Option<Scalar> {
match *self {
ConstValue::Unevaluated(..) |
ConstValue::ByRef(..) |
#[inline]
pub fn to_bits(&self, size: Size) -> Option<u128> {
- self.to_scalar()?.to_bits(size).ok()
+ self.try_to_scalar()?.to_bits(size).ok()
}
#[inline]
pub fn to_ptr(&self) -> Option<Pointer> {
- self.to_scalar()?.to_ptr().ok()
+ self.try_to_scalar()?.to_ptr().ok()
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum Value {
ByRef(Scalar, Align),
- Scalar(Scalar),
- ScalarPair(Scalar, Scalar),
+ Scalar(ScalarMaybeUndef),
+ ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef),
}
impl<'tcx> ty::TypeFoldable<'tcx> for Value {
pub fn ptr_null<C: HasDataLayout>(cx: C) -> Self {
Scalar::Bits {
bits: 0,
- defined: cx.data_layout().pointer_size.bits() as u8,
+ size: cx.data_layout().pointer_size.bytes() as u8,
}
}
+ pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
+ ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx)
+ }
+
pub fn ptr_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
- Scalar::Bits { bits, defined } => {
- let pointer_size = layout.pointer_size.bits() as u8;
- if defined < pointer_size {
- err!(ReadUndefBytes)
- } else {
- Ok(Scalar::Bits {
- bits: layout.signed_offset(bits as u64, i)? as u128,
- defined: pointer_size,
- })
- }
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, layout.pointer_size.bytes());
+ Ok(Scalar::Bits {
+ bits: layout.signed_offset(bits as u64, i)? as u128,
+ size,
+ })
}
Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr),
}
pub fn ptr_offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
- Scalar::Bits { bits, defined } => {
- let pointer_size = layout.pointer_size.bits() as u8;
- if defined < pointer_size {
- err!(ReadUndefBytes)
- } else {
- Ok(Scalar::Bits {
- bits: layout.offset(bits as u64, i.bytes())? as u128,
- defined: pointer_size,
- })
- }
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, layout.pointer_size.bytes());
+ Ok(Scalar::Bits {
+ bits: layout.offset(bits as u64, i.bytes())? as u128,
+ size,
+ })
}
Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr),
}
}
- pub fn ptr_wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn ptr_wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
let layout = cx.data_layout();
match self {
- Scalar::Bits { bits, defined } => {
- let pointer_size = layout.pointer_size.bits() as u8;
- if defined < pointer_size {
- err!(ReadUndefBytes)
- } else {
- Ok(Scalar::Bits {
- bits: layout.wrapping_signed_offset(bits as u64, i) as u128,
- defined: pointer_size,
- })
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, layout.pointer_size.bytes());
+ Scalar::Bits {
+ bits: layout.wrapping_signed_offset(bits as u64, i) as u128,
+ size,
+ }
}
- }
- Scalar::Ptr(ptr) => Ok(Scalar::Ptr(ptr.wrapping_signed_offset(i, layout))),
+ Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, layout)),
}
}
- pub fn is_null_ptr<C: HasDataLayout>(self, cx: C) -> EvalResult<'tcx, bool> {
+ pub fn is_null_ptr<C: HasDataLayout>(self, cx: C) -> bool {
match self {
- Scalar::Bits {
- bits, defined,
- } => if defined < cx.data_layout().pointer_size.bits() as u8 {
- err!(ReadUndefBytes)
- } else {
- Ok(bits == 0)
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, cx.data_layout().pointer_size.bytes());
+ bits == 0
},
- Scalar::Ptr(_) => Ok(false),
+ Scalar::Ptr(_) => false,
}
}
- pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
- Value::ScalarPair(self, Scalar::Bits {
- bits: len as u128,
- defined: cx.data_layout().pointer_size.bits() as u8,
- })
- }
-
- pub fn to_value_with_vtable(self, vtable: Pointer) -> Value {
- Value::ScalarPair(self, Scalar::Ptr(vtable))
- }
-
pub fn to_value(self) -> Value {
- Value::Scalar(self)
+ Value::Scalar(ScalarMaybeUndef::Scalar(self))
}
}
pub enum Scalar {
/// The raw bytes of a simple value.
Bits {
- /// The first `defined` number of bits are valid
- defined: u8,
+ /// The first `size` bytes are the value.
+ /// Do not try to read less or more bytes that that
+ size: u8,
bits: u128,
},
Ptr(Pointer),
}
-impl<'tcx> Scalar {
- pub fn undef() -> Self {
- Scalar::Bits { bits: 0, defined: 0 }
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
+pub enum ScalarMaybeUndef {
+ Scalar(Scalar),
+ Undef,
+}
+
+impl From<Scalar> for ScalarMaybeUndef {
+ fn from(s: Scalar) -> Self {
+ ScalarMaybeUndef::Scalar(s)
+ }
+}
+
+impl ScalarMaybeUndef {
+ pub fn read(self) -> EvalResult<'static, Scalar> {
+ match self {
+ ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
+ ScalarMaybeUndef::Undef => err!(ReadUndefBytes),
+ }
+ }
+
+ pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
+ Value::ScalarPair(self.into(), Scalar::Bits {
+ bits: len as u128,
+ size: cx.data_layout().pointer_size.bytes() as u8,
+ }.into())
+ }
+
+ pub fn to_value_with_vtable(self, vtable: Pointer) -> Value {
+ Value::ScalarPair(self.into(), Scalar::Ptr(vtable).into())
+ }
+
+ pub fn ptr_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ match self {
+ ScalarMaybeUndef::Scalar(scalar) => {
+ scalar.ptr_signed_offset(i, cx).map(ScalarMaybeUndef::Scalar)
+ },
+ ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef)
+ }
}
+ pub fn ptr_offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
+ match self {
+ ScalarMaybeUndef::Scalar(scalar) => {
+ scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar)
+ },
+ ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef)
+ }
+ }
+
+ pub fn ptr_wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+ match self {
+ ScalarMaybeUndef::Scalar(scalar) => {
+ ScalarMaybeUndef::Scalar(scalar.ptr_wrapping_signed_offset(i, cx))
+ },
+ ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef
+ }
+ }
+}
+
+impl<'tcx> Scalar {
pub fn from_bool(b: bool) -> Self {
- // FIXME: can we make defined `1`?
- Scalar::Bits { bits: b as u128, defined: 8 }
+ Scalar::Bits { bits: b as u128, size: 1 }
}
pub fn from_char(c: char) -> Self {
- Scalar::Bits { bits: c as u128, defined: 32 }
+ Scalar::Bits { bits: c as u128, size: 4 }
}
- pub fn to_bits(self, size: Size) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
match self {
- Scalar::Bits { .. } if size.bits() == 0 => bug!("to_bits cannot be used with zsts"),
- Scalar::Bits { bits, defined } if size.bits() <= defined as u64 => Ok(bits),
- Scalar::Bits { .. } => err!(ReadUndefBytes),
+ Scalar::Bits { bits, size } => {
+ assert_eq!(target_size.bytes(), size as u64);
+ assert_ne!(size, 0, "to_bits cannot be used with zsts");
+ Ok(bits)
+ }
Scalar::Ptr(_) => err!(ReadPointerAsBytes),
}
}
pub fn to_bool(self) -> EvalResult<'tcx, bool> {
match self {
- Scalar::Bits { bits: 0, defined: 8 } => Ok(false),
- Scalar::Bits { bits: 1, defined: 8 } => Ok(true),
+ Scalar::Bits { bits: 0, size: 1 } => Ok(false),
+ Scalar::Bits { bits: 1, size: 1 } => Ok(true),
_ => err!(InvalidBool),
}
}
use hir::def_id::DefId;
use hir::{self, HirId, InlineAsm};
use middle::region;
-use mir::interpret::{EvalErrorKind, Scalar, Value};
+use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef};
use mir::visit::MirVisitable;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
.map(|&u| {
let mut s = String::new();
print_miri_value(
- Value::Scalar(Scalar::Bits {
+ Scalar::Bits {
bits: u,
- defined: size.bits() as u8,
- }),
+ size: size.bytes() as u8,
+ }.to_value(),
switch_ty,
&mut s,
).unwrap();
pub fn print_miri_value<W: Write>(value: Value, ty: Ty, f: &mut W) -> fmt::Result {
use ty::TypeVariants::*;
- match (value, &ty.sty) {
- (Value::Scalar(Scalar::Bits { bits: 0, .. }), &TyBool) => write!(f, "false"),
- (Value::Scalar(Scalar::Bits { bits: 1, .. }), &TyBool) => write!(f, "true"),
- (Value::Scalar(Scalar::Bits { bits, .. }), &TyFloat(ast::FloatTy::F32)) => {
- write!(f, "{}f32", Single::from_bits(bits))
- }
- (Value::Scalar(Scalar::Bits { bits, .. }), &TyFloat(ast::FloatTy::F64)) => {
- write!(f, "{}f64", Double::from_bits(bits))
- }
- (Value::Scalar(Scalar::Bits { bits, .. }), &TyUint(ui)) => write!(f, "{:?}{}", bits, ui),
- (Value::Scalar(Scalar::Bits { bits, .. }), &TyInt(i)) => {
- let bit_width = ty::tls::with(|tcx| {
- let ty = tcx.lift_to_global(&ty).unwrap();
- tcx.layout_of(ty::ParamEnv::empty().and(ty))
- .unwrap()
- .size
- .bits()
- });
- let shift = 128 - bit_width;
- write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i)
- }
- (Value::Scalar(Scalar::Bits { bits, .. }), &TyChar) => {
- write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap())
+ // print some primitives
+ if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value {
+ match ty.sty {
+ TyBool if bits == 0 => return write!(f, "false"),
+ TyBool if bits == 1 => return write!(f, "true"),
+ TyFloat(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)),
+ TyFloat(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)),
+ TyUint(ui) => return write!(f, "{:?}{}", bits, ui),
+ TyInt(i) => {
+ let bit_width = ty::tls::with(|tcx| {
+ let ty = tcx.lift_to_global(&ty).unwrap();
+ tcx.layout_of(ty::ParamEnv::empty().and(ty))
+ .unwrap()
+ .size
+ .bits()
+ });
+ let shift = 128 - bit_width;
+ return write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i);
+ }
+ TyChar => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()),
+ _ => {},
}
- (_, &TyFnDef(did, _)) => write!(f, "{}", item_path_str(did)),
- (
- Value::ScalarPair(Scalar::Ptr(ptr), Scalar::Bits { bits: len, .. }),
- &TyRef(_, &ty::TyS { sty: TyStr, .. }, _),
- ) => ty::tls::with(|tcx| match tcx.alloc_map.lock().get(ptr.alloc_id) {
- Some(interpret::AllocType::Memory(alloc)) => {
- assert_eq!(len as usize as u128, len);
- let slice = &alloc.bytes[(ptr.offset.bytes() as usize)..][..(len as usize)];
- let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri");
- write!(f, "{:?}", s)
+ }
+ // print function definitons
+ if let TyFnDef(did, _) = ty.sty {
+ return write!(f, "{}", item_path_str(did));
+ }
+ // print string literals
+ if let Value::ScalarPair(ptr, len) = value {
+ if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr {
+ if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len {
+ if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty {
+ return ty::tls::with(|tcx| {
+ let alloc = tcx.alloc_map.lock().get(ptr.alloc_id);
+ if let Some(interpret::AllocType::Memory(alloc)) = alloc {
+ assert_eq!(len as usize as u128, len);
+ let slice = &alloc
+ .bytes
+ [(ptr.offset.bytes() as usize)..]
+ [..(len as usize)];
+ let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri");
+ write!(f, "{:?}", s)
+ } else {
+ write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len)
+ }
+ });
+ }
}
- _ => write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len),
- }),
- _ => write!(f, "{:?}:{}", value, ty),
+ }
}
+ // just raw dump everything else
+ write!(f, "{:?}:{}", value, ty)
}
fn item_path_str(def_id: DefId) -> String {
})
}
- #[inline]
- pub fn from_byval_value(
- tcx: TyCtxt<'_, '_, 'tcx>,
- val: Value,
- ty: Ty<'tcx>,
- ) -> &'tcx Self {
- Self::from_const_value(tcx, ConstValue::from_byval_value(val), ty)
- }
-
#[inline]
pub fn from_scalar(
tcx: TyCtxt<'_, '_, 'tcx>,
val: Scalar,
ty: Ty<'tcx>,
) -> &'tcx Self {
- Self::from_const_value(tcx, ConstValue::from_scalar(val), ty)
+ Self::from_const_value(tcx, ConstValue::Scalar(val), ty)
}
#[inline]
let shift = 128 - size.bits();
let truncated = (bits << shift) >> shift;
assert_eq!(truncated, bits, "from_bits called with untruncated value");
- Self::from_scalar(tcx, Scalar::Bits { bits, defined: size.bits() as u8 }, ty.value)
+ Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value)
}
#[inline]
pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
- Self::from_scalar(tcx, Scalar::undef(), ty)
+ Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty)
}
#[inline]
self.val.to_byval_value()
}
- #[inline]
- pub fn to_scalar(&self) -> Option<Scalar> {
- self.val.to_scalar()
- }
-
#[inline]
pub fn assert_bits(
&self,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() };
match cv {
- Scalar::Bits { defined, .. } if (defined as u64) < bitsize || defined == 0 => {
- C_undef(Type::ix(cx, bitsize))
+ Scalar::Bits { size: 0, .. } => {
+ assert_eq!(0, layout.value.size(cx).bytes());
+ C_undef(Type::ix(cx, 0))
},
- Scalar::Bits { bits, .. } => {
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, layout.value.size(cx).bytes());
let llval = C_uint_big(Type::ix(cx, bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
mir::Field::new(field as usize),
c,
)?;
- if let Some(prim) = field.to_scalar() {
+ if let Some(prim) = field.val.try_to_scalar() {
let layout = bx.cx.layout_of(field_ty);
let scalar = match layout.abi {
layout::Abi::Scalar(ref x) => x,
let trunc = |n| {
let param_ty = self.param_env.and(self.tcx.lift_to_global(&ty).unwrap());
- let bit_width = self.tcx.layout_of(param_ty).unwrap().size.bits();
- trace!("trunc {} with size {} and shift {}", n, bit_width, 128 - bit_width);
- let shift = 128 - bit_width;
+ let width = self.tcx.layout_of(param_ty).unwrap().size;
+ trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
+ let shift = 128 - width.bits();
let result = (n << shift) >> shift;
trace!("trunc result: {}", result);
ConstValue::Scalar(Scalar::Bits {
bits: result,
- defined: bit_width as u8,
+ size: width.bytes() as u8,
})
};
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx);
- ConstValue::from_byval_value(value)
+ ConstValue::from_byval_value(value).unwrap()
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
},
LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits {
bits: n as u128,
- defined: 8,
+ size: 1,
}),
LitKind::Int(n, _) if neg => {
let n = n as i128;
};
parse_float(n, fty)
}
- LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bits {
- bits: b as u128,
- defined: 8,
- }),
- LitKind::Char(c) => ConstValue::Scalar(Scalar::Bits {
- bits: c as u128,
- defined: 32,
- }),
+ LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)),
+ LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)),
};
ty::Const::from_const_value(self.tcx, lit, ty)
}
use interpret::{const_val_field, const_variant_index, self};
use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability};
-use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, Value};
+use rustc::mir::interpret::{Scalar, GlobalId, ConstValue};
use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region};
use rustc::ty::subst::{Substs, Kind};
use rustc::hir::{self, PatKind, RangeEnd};
l.partial_cmp(&r)
},
ty::TyInt(_) => {
- let a = interpret::sign_extend(tcx, a, ty.value).expect("layout error for TyInt");
- let b = interpret::sign_extend(tcx, b, ty.value).expect("layout error for TyInt");
+ let layout = tcx.layout_of(ty).ok()?;
+ let a = interpret::sign_extend(a, layout);
+ let b = interpret::sign_extend(b, layout);
Some((a as i128).cmp(&(b as i128)))
},
_ => Some(a.cmp(&b)),
if let ty::TyRef(_, rty, _) = ty.value.sty {
if let ty::TyStr = rty.sty {
- match (a.to_byval_value(), b.to_byval_value()) {
+ match (a.val, b.val) {
(
- Some(Value::ScalarPair(
+ ConstValue::ScalarPair(
Scalar::Ptr(ptr_a),
len_a,
- )),
- Some(Value::ScalarPair(
+ ),
+ ConstValue::ScalarPair(
Scalar::Ptr(ptr_b),
len_b,
- ))
+ ),
) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => {
if let Ok(len_a) = len_a.to_bits(tcx.data_layout.pointer_size) {
if let Ok(len_b) = len_b.to_bits(tcx.data_layout.pointer_size) {
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx);
- ConstValue::from_byval_value(value)
+ ConstValue::from_byval_value(value).unwrap()
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
},
LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits {
bits: n as u128,
- defined: 8,
+ size: 1,
}),
LitKind::Int(n, _) => {
enum Int {
Int::Signed(IntTy::I128)| Int::Unsigned(UintTy::U128) => n,
_ => bug!(),
};
- let defined = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bits() as u8;
+ let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bytes() as u8;
ConstValue::Scalar(Scalar::Bits {
bits: n,
- defined,
+ size,
})
},
LitKind::Float(n, fty) => {
};
parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)?
}
- LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bits {
- bits: b as u128,
- defined: 8,
- }),
- LitKind::Char(c) => ConstValue::Scalar(Scalar::Bits {
- bits: c as u128,
- defined: 32,
- }),
+ LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)),
+ LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)),
};
Ok(ty::Const::from_const_value(tcx, lit, ty))
}
let num = num.as_str();
use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float;
- let (bits, defined) = match fty {
+ let (bits, size) = match fty {
ast::FloatTy::F32 => {
num.parse::<f32>().map_err(|_| ())?;
let mut f = num.parse::<Single>().unwrap_or_else(|e| {
if neg {
f = -f;
}
- (f.to_bits(), 32)
+ (f.to_bits(), 4)
}
ast::FloatTy::F64 => {
num.parse::<f64>().map_err(|_| ())?;
if neg {
f = -f;
}
- (f.to_bits(), 64)
+ (f.to_bits(), 8)
}
};
- Ok(ConstValue::Scalar(Scalar::Bits { bits, defined }))
+ Ok(ConstValue::Scalar(Scalar::Bits { bits, size }))
}
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::layout::{self, LayoutOf, TyLayout};
use syntax::ast::{FloatTy, IntTy, UintTy};
use rustc_apfloat::ieee::{Single, Double};
dest_ty: Ty<'tcx>,
dest: Place,
) -> EvalResult<'tcx> {
+ let src_layout = self.layout_of(src.ty)?;
+ let dst_layout = self.layout_of(dest_ty)?;
use rustc::mir::CastKind::*;
match kind {
Unsize => {
- let src_layout = self.layout_of(src.ty)?;
- let dst_layout = self.layout_of(dest_ty)?;
self.unsize_into(src.value, src_layout, dest, dst_layout)?;
}
let discr_val = def
.discriminant_for_variant(*self.tcx, index)
.val;
- let defined = self
- .layout_of(dest_ty)
- .unwrap()
- .size
- .bits() as u8;
return self.write_scalar(
dest,
Scalar::Bits {
bits: discr_val,
- defined,
+ size: dst_layout.size.bytes() as u8,
},
dest_ty);
}
}
let src_val = self.value_to_scalar(src)?;
- let dest_val = self.cast_scalar(src_val, src.ty, dest_ty)?;
+ let dest_val = self.cast_scalar(src_val, src_layout, dst_layout)?;
let valty = ValTy {
- value: Value::Scalar(dest_val),
+ value: Value::Scalar(dest_val.into()),
ty: dest_ty,
};
self.write_value(valty, dest)?;
).ok_or_else(|| EvalErrorKind::TooGeneric.into());
let fn_ptr = self.memory.create_fn_alloc(instance?);
let valty = ValTy {
- value: Value::Scalar(fn_ptr.into()),
+ value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()),
ty: dest_ty,
};
self.write_value(valty, dest)?;
);
let fn_ptr = self.memory.create_fn_alloc(instance);
let valty = ValTy {
- value: Value::Scalar(fn_ptr.into()),
+ value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()),
ty: dest_ty,
};
self.write_value(valty, dest)?;
pub(super) fn cast_scalar(
&self,
val: Scalar,
- src_ty: Ty<'tcx>,
- dest_ty: Ty<'tcx>,
+ src_layout: TyLayout<'tcx>,
+ dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
use rustc::ty::TypeVariants::*;
- trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty);
+ trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
match val {
- Scalar::Bits { defined: 0, .. } => Ok(val),
- Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty),
- Scalar::Bits { bits, .. } => {
- // TODO(oli-obk): check defined bits here
- match src_ty.sty {
- TyFloat(fty) => self.cast_from_float(bits, fty, dest_ty),
- _ => self.cast_from_int(bits, src_ty, dest_ty),
+ Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_layout.ty),
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, src_layout.size.bytes());
+ match src_layout.ty.sty {
+ TyFloat(fty) => self.cast_from_float(bits, fty, dest_layout.ty),
+ _ => self.cast_from_int(bits, src_layout, dest_layout),
}
}
}
fn cast_from_int(
&self,
v: u128,
- src_ty: Ty<'tcx>,
- dest_ty: Ty<'tcx>,
+ src_layout: TyLayout<'tcx>,
+ dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
- let signed = self.layout_of(src_ty)?.abi.is_signed();
+ let signed = src_layout.abi.is_signed();
let v = if signed {
- self.sign_extend(v, src_ty)?
+ self.sign_extend(v, src_layout)
} else {
v
};
- trace!("cast_from_int: {}, {}, {}", v, src_ty, dest_ty);
+ trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty);
use rustc::ty::TypeVariants::*;
- match dest_ty.sty {
+ match dest_layout.ty.sty {
TyInt(_) | TyUint(_) => {
- let v = self.truncate(v, dest_ty)?;
+ let v = self.truncate(v, dest_layout);
Ok(Scalar::Bits {
bits: v,
- defined: self.layout_of(dest_ty).unwrap().size.bits() as u8,
+ size: dest_layout.size.bytes() as u8,
})
}
TyFloat(FloatTy::F32) if signed => Ok(Scalar::Bits {
bits: Single::from_i128(v as i128).value.to_bits(),
- defined: 32,
+ size: 4,
}),
TyFloat(FloatTy::F64) if signed => Ok(Scalar::Bits {
bits: Double::from_i128(v as i128).value.to_bits(),
- defined: 64,
+ size: 8,
}),
TyFloat(FloatTy::F32) => Ok(Scalar::Bits {
bits: Single::from_u128(v).value.to_bits(),
- defined: 32,
+ size: 4,
}),
TyFloat(FloatTy::F64) => Ok(Scalar::Bits {
bits: Double::from_u128(v).value.to_bits(),
- defined: 64,
+ size: 8,
}),
- TyChar if v as u8 as u128 == v => Ok(Scalar::Bits { bits: v, defined: 32 }),
- TyChar => err!(InvalidChar(v)),
+ TyChar => {
+ assert_eq!(v as u8 as u128, v);
+ Ok(Scalar::Bits { bits: v, size: 4 })
+ },
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
TyRawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory.truncate_to_ptr(v).0 as u128,
- defined: self.memory.pointer_size().bits() as u8,
+ size: self.memory.pointer_size().bytes() as u8,
})
},
// Casts to bool are not permitted by rustc, no need to handle them here.
- _ => err!(Unimplemented(format!("int to {:?} cast", dest_ty))),
+ _ => err!(Unimplemented(format!("int to {:?} cast", dest_layout.ty))),
}
}
match fty {
FloatTy::F32 => Ok(Scalar::Bits {
bits: Single::from_bits(bits).to_u128(width).value,
- defined: width as u8,
+ size: (width / 8) as u8,
}),
FloatTy::F64 => Ok(Scalar::Bits {
bits: Double::from_bits(bits).to_u128(width).value,
- defined: width as u8,
+ size: (width / 8) as u8,
}),
}
},
match fty {
FloatTy::F32 => Ok(Scalar::Bits {
bits: Single::from_bits(bits).to_i128(width).value as u128,
- defined: width as u8,
+ size: (width / 8) as u8,
}),
FloatTy::F64 => Ok(Scalar::Bits {
bits: Double::from_bits(bits).to_i128(width).value as u128,
- defined: width as u8,
+ size: (width / 8) as u8,
}),
}
},
TyFloat(FloatTy::F32) if fty == FloatTy::F64 => {
Ok(Scalar::Bits {
bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
- defined: 32,
+ size: 4,
})
},
// f32 -> f64
TyFloat(FloatTy::F64) if fty == FloatTy::F32 => {
Ok(Scalar::Bits {
bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
- defined: 64,
+ size: 8,
})
},
// identity cast
TyFloat(FloatTy:: F64) => Ok(Scalar::Bits {
bits,
- defined: 64,
+ size: 8,
}),
TyFloat(FloatTy:: F32) => Ok(Scalar::Bits {
bits,
- defined: 32,
+ size: 4,
}),
_ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))),
}
use std::error::Error;
use rustc::hir;
-use rustc::mir::interpret::{ConstEvalErr};
+use rustc::mir::interpret::{ConstEvalErr, ScalarMaybeUndef};
use rustc::mir;
use rustc::ty::{self, TyCtxt, Ty, Instance};
use rustc::ty::layout::{self, LayoutOf, Primitive, TyLayout};
use rustc::ty::subst::Subst;
+use rustc_data_structures::indexed_vec::IndexVec;
use syntax::ast::Mutability;
use syntax::codemap::Span;
let param_env = tcx.param_env(instance.def_id());
let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeEvaluator, ());
// insert a stack frame so any queries have the correct substs
- ecx.push_stack_frame(
+ ecx.stack.push(super::eval_context::Frame {
+ block: mir::START_BLOCK,
+ locals: IndexVec::new(),
instance,
span,
mir,
- Place::undef(),
- StackPopCleanup::None,
- )?;
+ return_place: Place::undef(),
+ return_to_block: StackPopCleanup::None,
+ stmt: 0,
+ });
Ok(ecx)
}
) -> &'tcx ty::Const<'tcx> {
let layout = ecx.layout_of(ty).unwrap();
match (val, &layout.abi) {
- (Value::Scalar(Scalar::Bits { defined: 0, ..}), _) if layout.is_zst() => {},
+ (Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size: 0, ..})), _) if layout.is_zst() => {},
(Value::ByRef(..), _) |
(Value::Scalar(_), &layout::Abi::Scalar(_)) |
(Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {},
}
let val = (|| {
match val {
- Value::Scalar(val) => Ok(ConstValue::Scalar(val)),
- Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)),
+ Value::Scalar(val) => Ok(ConstValue::Scalar(val.read()?)),
+ Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a.read()?, b.read()?)),
Value::ByRef(ptr, align) => {
let ptr = ptr.to_ptr().unwrap();
let alloc = ecx.memory.get(ptr.alloc_id)?;
let elem_align = ecx.layout_of(elem_ty)?.align.abi();
let align_val = Scalar::Bits {
bits: elem_align as u128,
- defined: dest_layout.size.bits() as u8,
+ size: dest_layout.size.bytes() as u8,
};
ecx.write_scalar(dest, align_val, dest_layout.ty)?;
}
let size = ecx.layout_of(ty)?.size.bytes() as u128;
let size_val = Scalar::Bits {
bits: size,
- defined: dest_layout.size.bits() as u8,
+ size: dest_layout.size.bytes() as u8,
};
ecx.write_scalar(dest, size_val, dest_layout.ty)?;
}
let type_id = ecx.tcx.type_id_hash(ty) as u128;
let id_val = Scalar::Bits {
bits: type_id,
- defined: dest_layout.size.bits() as u8,
+ size: dest_layout.size.bytes() as u8,
};
ecx.write_scalar(dest, id_val, dest_layout.ty)?;
}
let place = ecx.allocate_place_for_value(value, layout, variant)?;
let (place, layout) = ecx.place_field(place, field, layout)?;
let (ptr, align) = place.to_ptr_align();
- let mut new_value = Value::ByRef(ptr, align);
+ let mut new_value = Value::ByRef(ptr.read()?, align);
new_value = ecx.try_read_by_ref(new_value, layout.ty)?;
use rustc_data_structures::indexed_vec::Idx;
match (value, new_value) {
};
if tcx.is_static(def_id).is_some() {
err.report_as_error(ecx.tcx, "could not evaluate static initializer");
+ if tcx.sess.err_count() == 0 {
+ span_bug!(span, "static eval failure didn't emit an error: {:#?}", err);
+ }
}
err.into()
})
bits: u128,
kind: Primitive,
) -> EvalResult<'tcx, Scalar> {
- let defined = match kind {
- Primitive::Int(integer, _) => integer.size().bits() as u8,
+ let size = match kind {
+ Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
};
- let extra = 128 - defined as u128;
+ let extra = 128 - size.bits() as u128;
let bits_out = match name {
"ctpop" => bits.count_ones() as u128,
"ctlz" => bits.leading_zeros() as u128 - extra,
"bswap" => (bits << extra).swap_bytes(),
_ => bug!("not a numeric intrinsic: {}", name),
};
- Ok(Scalar::Bits { bits: bits_out, defined })
+ Ok(Scalar::Bits { bits: bits_out, size: size.bytes() as u8 })
}
use rustc::mir::interpret::{
GlobalId, Value, Scalar, FrameInfo, AllocType,
EvalResult, EvalErrorKind, Pointer, ConstValue,
+ ScalarMaybeUndef,
};
use syntax::codemap::{self, Span};
/// `[return_ptr, arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
- ///
- /// Before being initialized, arguments are `Value::Scalar(Scalar::undef())` and other locals are `None`.
- pub locals: IndexVec<mir::Local, Option<Value>>,
+ pub locals: IndexVec<mir::Local, LocalValue>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
pub stmt: usize,
}
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub enum LocalValue {
+ Dead,
+ Live(Value),
+}
+
+impl LocalValue {
+ pub fn access(self) -> EvalResult<'static, Value> {
+ match self {
+ LocalValue::Dead => err!(DeadLocal),
+ LocalValue::Live(val) => Ok(val),
+ }
+ }
+}
+
impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {}
impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> {
let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align))
},
- ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)),
- ConstValue::Scalar(val) => Ok(Value::Scalar(val)),
+ ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a.into(), b.into())),
+ ConstValue::Scalar(val) => Ok(Value::Scalar(val.into())),
}
}
) -> EvalResult<'tcx> {
::log_settings::settings().indentation += 1;
- let locals = if mir.local_decls.len() > 1 {
- let mut locals = IndexVec::from_elem(Some(Value::Scalar(Scalar::undef())), &mir.local_decls);
+ // first push a stack frame so we have access to the local substs
+ self.stack.push(Frame {
+ mir,
+ block: mir::START_BLOCK,
+ return_to_block,
+ return_place,
+ // empty local array, we fill it in below, after we are inside the stack frame and
+ // all methods actually know about the frame
+ locals: IndexVec::new(),
+ span,
+ instance,
+ stmt: 0,
+ });
+
+ // don't allocate at all for trivial constants
+ if mir.local_decls.len() > 1 {
+ let mut locals = IndexVec::from_elem(LocalValue::Dead, &mir.local_decls);
+ for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) {
+ *local = LocalValue::Live(self.init_value(decl.ty)?);
+ }
match self.tcx.describe_def(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them
Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {},
use rustc::mir::StatementKind::{StorageDead, StorageLive};
match stmt.kind {
StorageLive(local) |
- StorageDead(local) => locals[local] = None,
+ StorageDead(local) => locals[local] = LocalValue::Dead,
_ => {}
}
}
}
},
}
- locals
- } else {
- // don't allocate at all for trivial constants
- IndexVec::new()
- };
-
- self.stack.push(Frame {
- mir,
- block: mir::START_BLOCK,
- return_to_block,
- return_place,
- locals,
- span,
- instance,
- stmt: 0,
- });
+ self.frame_mut().locals = locals;
+ }
self.memory.cur_frame = self.cur_frame();
if let Place::Ptr { ptr, .. } = frame.return_place {
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
self.memory.mark_static_initialized(
- ptr.to_ptr()?.alloc_id,
+ ptr.read()?.to_ptr()?.alloc_id,
mutable,
)?
} else {
Ok(())
}
- pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
- if let Some(Value::ByRef(ptr, _align)) = local {
+ pub fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
+ // FIXME: should we tell the user that there was a local which was never written to?
+ if let LocalValue::Live(Value::ByRef(ptr, _align)) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory.dump_alloc(ptr.alloc_id);
) -> EvalResult<'tcx> {
let dest = self.eval_place(place)?;
let dest_ty = self.place_ty(place);
+ let dest_layout = self.layout_of(dest_ty)?;
use rustc::mir::Rvalue::*;
match *rvalue {
UnaryOp(un_op, ref operand) => {
let val = self.eval_operand_to_scalar(operand)?;
- let val = self.unary_op(un_op, val, dest_ty)?;
+ let val = self.unary_op(un_op, val, dest_layout)?;
self.write_scalar(
dest,
val,
let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
if length > 0 {
+ let dest = dest.read()?;
//write the first value
self.write_value_to_ptr(value, dest, dest_align, elem_ty)?;
let src = self.eval_place(place)?;
let ty = self.place_ty(place);
let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx);
- let defined = self.memory.pointer_size().bits() as u8;
+ let size = self.memory.pointer_size().bytes() as u8;
self.write_scalar(
dest,
Scalar::Bits {
bits: len as u128,
- defined,
+ size,
},
dest_ty,
)?;
let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra();
let val = match extra {
- PlaceExtra::None => ptr.to_value(),
+ PlaceExtra::None => Value::Scalar(ptr),
PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx),
PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
PlaceExtra::DowncastVariant(..) => {
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(),
"SizeOf nullary MIR operator called for unsized type");
- let defined = self.memory.pointer_size().bits() as u8;
+ let size = self.memory.pointer_size().bytes() as u8;
self.write_scalar(
dest,
Scalar::Bits {
bits: layout.size.bytes() as u128,
- defined,
+ size,
},
dest_ty,
)?;
let layout = self.layout_of(ty)?;
let place = self.eval_place(place)?;
let discr_val = self.read_discriminant_value(place, layout)?;
- let defined = self.layout_of(dest_ty).unwrap().size.bits() as u8;
+ let size = self.layout_of(dest_ty).unwrap().size.bytes() as u8;
self.write_scalar(dest, Scalar::Bits {
bits: discr_val,
- defined,
+ size,
}, dest_ty)?;
}
}
assert!(variants_start == variants_end);
dataful_variant as u128
},
- Scalar::Bits { bits: raw_discr, defined } => {
- if defined < discr.size.bits() as u8 {
- return err!(ReadUndefBytes);
- }
+ Scalar::Bits { bits: raw_discr, size } => {
+ assert_eq!(size as u64, discr.size.bytes());
let discr = raw_discr.wrapping_sub(niche_start)
.wrapping_add(variants_start);
if variants_start <= discr && discr <= variants_end {
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
// representation
- let size = tag.value.size(self.tcx.tcx).bits();
- let shift = 128 - size;
+ let size = tag.value.size(self.tcx.tcx);
+ let shift = 128 - size.bits();
let discr_val = (discr_val << shift) >> shift;
let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?;
self.write_scalar(discr_dest, Scalar::Bits {
bits: discr_val,
- defined: size as u8,
+ size: size.bytes() as u8,
}, tag.ty)?;
}
layout::Variants::NicheFilling {
.wrapping_add(niche_start);
self.write_scalar(niche_dest, Scalar::Bits {
bits: niche_value,
- defined: niche.size.bits() as u8,
+ size: niche.size.bytes() as u8,
}, niche.ty)?;
}
}
pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
let new_place = match place {
Place::Local { frame, local } => {
- match self.stack[frame].locals[local] {
- None => return err!(DeadLocal),
- Some(Value::ByRef(ptr, align)) => {
+ match self.stack[frame].locals[local].access()? {
+ Value::ByRef(ptr, align) => {
Place::Ptr {
- ptr,
+ ptr: ptr.into(),
align,
extra: PlaceExtra::None,
}
}
- Some(val) => {
+ val => {
let ty = self.stack[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let layout = self.layout_of(ty)?;
let ptr = self.alloc_ptr(layout)?;
self.stack[frame].locals[local] =
- Some(Value::ByRef(ptr.into(), layout.align)); // it stays live
+ LocalValue::Live(Value::ByRef(ptr.into(), layout.align)); // it stays live
+
let place = Place::from_ptr(ptr, layout.align);
self.write_value(ValTy { value: val, ty }, place)?;
place
pub fn write_scalar(
&mut self,
dest: Place,
- val: Scalar,
+ val: impl Into<ScalarMaybeUndef>,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let valty = ValTy {
- value: Value::Scalar(val),
+ value: Value::Scalar(val.into()),
ty: dest_ty,
};
self.write_value(valty, dest)
match dest {
Place::Ptr { ptr, align, extra } => {
assert_eq!(extra, PlaceExtra::None);
- self.write_value_to_ptr(src_val, ptr, align, dest_ty)
+ self.write_value_to_ptr(src_val, ptr.read()?, align, dest_ty)
}
Place::Local { frame, local } => {
- let dest = self.stack[frame].get_local(local)?;
+ let old_val = self.stack[frame].locals[local].access()?;
self.write_value_possibly_by_val(
src_val,
|this, val| this.stack[frame].set_local(local, val),
- dest,
+ old_val,
dest_ty,
)
}
old_dest_val: Value,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
+ // FIXME: this should be a layout check, not underlying value
if let Value::ByRef(dest_ptr, align) = old_dest_val {
// If the value is already `ByRef` (that is, backed by an `Allocation`),
// then we must write the new value into this allocation, because there may be
layout::Primitive::Int(_, signed) => signed,
_ => false,
},
- _ => match scalar {
- Scalar::Bits { defined: 0, .. } => false,
- _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout),
- }
+ _ => false,
};
self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed)
}
pointee_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
let ptr_size = self.memory.pointer_size();
- let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into();
+ let p: ScalarMaybeUndef = self.memory.read_ptr_sized(ptr, ptr_align)?;
if self.type_is_sized(pointee_ty) {
- Ok(p.to_value())
+ Ok(Value::Scalar(p))
} else {
trace!("reading fat pointer extra of type {}", pointee_ty);
let extra = ptr.offset(ptr_size, self)?;
match self.tcx.struct_tail(pointee_ty).sty {
- ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
- self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?,
+ ty::TyDynamic(..) => Ok(Value::ScalarPair(
+ p,
+ self.memory.read_ptr_sized(extra, ptr_align)?,
)),
ty::TySlice(..) | ty::TyStr => {
let len = self
.memory
.read_ptr_sized(extra, ptr_align)?
+ .read()?
.to_bits(ptr_size)?;
Ok(p.to_value_with_len(len as u64, self.tcx.tcx))
},
match ty.sty {
ty::TyChar => {
assert_eq!(size.bytes(), 4);
- if ::std::char::from_u32(bits as u32).is_none() {
- return err!(InvalidChar(bits));
+ let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.read()?.to_bits(Size::from_bytes(4))? as u32;
+ match ::std::char::from_u32(c) {
+ Some(..) => (),
+ None => return err!(InvalidChar(c as u128)),
}
}
_ => {},
self.memory.check_align(ptr, ptr_align)?;
if layout.size.bytes() == 0 {
- return Ok(Some(Value::Scalar(Scalar::undef())));
+ return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 }))));
}
let ptr = ptr.to_ptr()?;
}
let (src_f_value, src_field) = match src {
Value::ByRef(ptr, align) => {
- let src_place = Place::from_scalar_ptr(ptr, align);
+ let src_place = Place::from_scalar_ptr(ptr.into(), align);
let (src_f_place, src_field) =
self.place_field(src_place, mir::Field::new(i), src_layout)?;
(self.read_place(src_f_place)?, src_field)
}
write!(msg, ":").unwrap();
- match self.stack[frame].get_local(local) {
+ match self.stack[frame].locals[local].access() {
Err(err) => {
if let EvalErrorKind::DeadLocal = err.kind {
write!(msg, " is dead").unwrap();
}
Ok(Value::Scalar(val)) => {
write!(msg, " {:?}", val).unwrap();
- if let Scalar::Ptr(ptr) = val {
+ if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id);
}
}
Ok(Value::ScalarPair(val1, val2)) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
- if let Scalar::Ptr(ptr) = val1 {
+ if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id);
}
- if let Scalar::Ptr(ptr) = val2 {
+ if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 {
allocs.push(ptr.alloc_id);
}
}
}
Place::Ptr { ptr, align, .. } => {
match ptr {
- Scalar::Ptr(ptr) => {
+ ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => {
trace!("by align({}) ref:", align.abi());
self.memory.dump_alloc(ptr.alloc_id);
}
}
}
- /// Convenience function to ensure correct usage of locals
- pub fn modify_local<F>(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx>
- where
- F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
- {
- let val = self.stack[frame].get_local(local)?;
- let new_val = f(self, val)?;
- self.stack[frame].set_local(local, new_val)?;
- // FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
- // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
- // self.memory.deallocate(ptr)?;
- // }
- Ok(())
- }
-
pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> (Vec<FrameInfo>, Span) {
let mut last_span = None;
let mut frames = Vec::new();
(frames, self.tcx.span)
}
- pub fn sign_extend(&self, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
- super::sign_extend(self.tcx.tcx, value, ty)
+ pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
+ super::sign_extend(value, ty)
}
- pub fn truncate(&self, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
- super::truncate(self.tcx.tcx, value, ty)
+ pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
+ super::truncate(value, ty)
}
fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result {
}
}
}
-}
-impl<'mir, 'tcx> Frame<'mir, 'tcx> {
- pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
- self.locals[local].ok_or_else(|| EvalErrorKind::DeadLocal.into())
+ pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
+ trace!("{:?} is now live", local);
+
+ let ty = self.frame().mir.local_decls[local].ty;
+ let init = self.init_value(ty)?;
+ // StorageLive *always* kills the value that's currently stored
+ Ok(mem::replace(&mut self.frame_mut().locals[local], LocalValue::Live(init)))
}
+ fn init_value(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
+ let ty = self.monomorphize(ty, self.substs());
+ let layout = self.layout_of(ty)?;
+ Ok(match layout.abi {
+ layout::Abi::Scalar(..) => Value::Scalar(ScalarMaybeUndef::Undef),
+ layout::Abi::ScalarPair(..) => Value::ScalarPair(
+ ScalarMaybeUndef::Undef,
+ ScalarMaybeUndef::Undef,
+ ),
+ _ => Value::ByRef(self.alloc_ptr(ty)?.into(), layout.align),
+ })
+ }
+}
+
+impl<'mir, 'tcx> Frame<'mir, 'tcx> {
fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
match self.locals[local] {
- None => err!(DeadLocal),
- Some(ref mut local) => {
+ LocalValue::Dead => err!(DeadLocal),
+ LocalValue::Live(ref mut local) => {
*local = value;
Ok(())
}
}
}
- pub fn storage_live(&mut self, local: mir::Local) -> Option<Value> {
- trace!("{:?} is now live", local);
-
- // StorageLive *always* kills the value that's currently stored
- mem::replace(&mut self.locals[local], Some(Value::Scalar(Scalar::undef())))
- }
-
/// Returns the old value of the local
- pub fn storage_dead(&mut self, local: mir::Local) -> Option<Value> {
+ pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
trace!("{:?} is now dead", local);
- self.locals[local].take()
+ mem::replace(&mut self.locals[local], LocalValue::Dead)
}
}
use rustc::ty::ParamEnv;
use rustc::ty::query::TyCtxtAt;
use rustc::ty::layout::{self, Align, TargetDataLayout, Size};
-use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value,
+use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef,
EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType};
pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher};
let alloc = self.get(ptr.alloc_id)?;
(ptr.offset.bytes(), alloc.align)
}
- Scalar::Bits { bits, defined } => {
- if (defined as u64) < self.pointer_size().bits() {
- return err!(ReadUndefBytes);
- }
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, self.pointer_size().bytes());
// FIXME: what on earth does this line do? docs or fix needed!
let v = ((bits as u128) % (1 << self.pointer_size().bytes())) as u64;
if v == 0 {
Ok(())
}
- pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, Scalar> {
+ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
let endianness = self.endianness();
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// We must not return Ok() for unaligned pointers!
if self.check_defined(ptr, size).is_err() {
// this inflates undefined bytes to the entire scalar, even if only a few bytes are undefined
- return Ok(Scalar::undef().into());
+ return Ok(ScalarMaybeUndef::Undef);
}
// Now we do the actual reading
let bits = read_target_uint(endianness, bytes).unwrap();
} else {
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
- Some(&alloc_id) => return Ok(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into()),
+ Some(&alloc_id) => return Ok(ScalarMaybeUndef::Scalar(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into())),
None => {},
}
}
// We don't. Just return the bits.
- Ok(Scalar::Bits {
+ Ok(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits,
- defined: size.bits() as u8,
- })
+ size: size.bytes() as u8,
+ }))
}
- pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, Scalar> {
+ pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, ScalarMaybeUndef> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
- pub fn write_scalar(&mut self, ptr: Scalar, ptr_align: Align, val: Scalar, size: Size, signed: bool) -> EvalResult<'tcx> {
+ pub fn write_scalar(&mut self, ptr: Scalar, ptr_align: Align, val: ScalarMaybeUndef, type_size: Size, signed: bool) -> EvalResult<'tcx> {
let endianness = self.endianness();
+ let val = match val {
+ ScalarMaybeUndef::Scalar(scalar) => scalar,
+ ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
+ };
+
let bytes = match val {
Scalar::Ptr(val) => {
- assert_eq!(size, self.pointer_size());
+ assert_eq!(type_size, self.pointer_size());
val.offset.bytes() as u128
}
- Scalar::Bits { bits, defined } if defined as u64 >= size.bits() && size.bits() != 0 => bits,
-
- Scalar::Bits { .. } => {
- self.check_align(ptr.into(), ptr_align)?;
- self.mark_definedness(ptr, size, false)?;
+ Scalar::Bits { size: 0, .. } => {
+ // nothing to do for ZSTs
+ assert_eq!(type_size.bytes(), 0);
return Ok(());
}
+
+ Scalar::Bits { bits, size } => {
+ assert_eq!(size as u64, type_size.bytes());
+ bits
+ },
};
let ptr = ptr.to_ptr()?;
{
- let align = self.int_align(size);
- let dst = self.get_bytes_mut(ptr, size, ptr_align.min(align))?;
+ let align = self.int_align(type_size);
+ let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(align))?;
if signed {
write_target_int(endianness, dst, bytes as i128).unwrap();
} else {
Ok(())
}
- pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: Scalar) -> EvalResult<'tcx> {
+ pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size, false)
}
fn into_ptr(
&self,
value: Value,
- ) -> EvalResult<'tcx, Scalar> {
+ ) -> EvalResult<'tcx, ScalarMaybeUndef> {
Ok(match value {
Value::ByRef(ptr, align) => {
self.memory().read_ptr_sized(ptr.to_ptr()?, align)?
fn into_ptr_vtable_pair(
&self,
value: Value,
- ) -> EvalResult<'tcx, (Scalar, Pointer)> {
+ ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> {
match value {
Value::ByRef(ref_ptr, align) => {
let mem = self.memory();
let vtable = mem.read_ptr_sized(
ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
align
- )?.to_ptr()?;
+ )?.read()?.to_ptr()?;
Ok((ptr, vtable))
}
- Value::ScalarPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
+ Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.read()?.to_ptr()?)),
_ => bug!("expected ptr and vtable, got {:?}", value),
}
}
fn into_slice(
&self,
value: Value,
- ) -> EvalResult<'tcx, (Scalar, u64)> {
+ ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> {
match value {
Value::ByRef(ref_ptr, align) => {
let mem = self.memory();
let len = mem.read_ptr_sized(
ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
align
- )?.to_bits(mem.pointer_size())? as u64;
+ )?.read()?.to_bits(mem.pointer_size())? as u64;
Ok((ptr, len))
}
Value::ScalarPair(ptr, val) => {
- let len = val.to_bits(self.memory().pointer_size())?;
- Ok((ptr.into(), len as u64))
+ let len = val.read()?.to_bits(self.memory().pointer_size())?;
+ Ok((ptr, len as u64))
}
Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value),
}
mod terminator;
mod traits;
-pub use self::eval_context::{EvalContext, Frame, StackPopCleanup,
- TyAndPacked, ValTy};
+pub use self::eval_context::{
+ EvalContext, Frame, StackPopCleanup,
+ TyAndPacked, ValTy,
+};
pub use self::place::{Place, PlaceExtra};
pub use self::memory::{write_target_uint, write_target_int, read_target_uint};
-use rustc::mir::interpret::{EvalResult, EvalErrorKind};
-use rustc::ty::{Ty, TyCtxt, ParamEnv};
+use rustc::ty::layout::TyLayout;
-pub fn sign_extend<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
- let param_env = ParamEnv::empty();
- let layout = tcx.layout_of(param_env.and(ty)).map_err(|layout| EvalErrorKind::Layout(layout))?;
+pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 {
let size = layout.size.bits();
assert!(layout.abi.is_signed());
// sign extend
let shift = 128 - size;
// shift the unsigned value to the left
// and back to the right as signed (essentially fills with FF on the left)
- Ok((((value << shift) as i128) >> shift) as u128)
+ (((value << shift) as i128) >> shift) as u128
}
-pub fn truncate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
- let param_env = ParamEnv::empty();
- let layout = tcx.layout_of(param_env.and(ty)).map_err(|layout| EvalErrorKind::Layout(layout))?;
+pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 {
let size = layout.size.bits();
let shift = 128 - size;
// truncate (shift left to drop out leftover values, shift right to fill with zeroes)
- Ok((value << shift) >> shift)
+ (value << shift) >> shift
}
use rustc::mir;
use rustc::ty::{self, Ty, layout};
use syntax::ast::FloatTy;
-use rustc::ty::layout::LayoutOf;
+use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
- let val = Value::ScalarPair(val, Scalar::from_bool(overflowed));
+ let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
let valty = ValTy {
value: val,
ty: dest_ty,
let signed = left_layout.abi.is_signed();
let mut oflo = (r as u32 as u128) != r;
let mut r = r as u32;
- let size = left_layout.size.bits() as u32;
- oflo |= r >= size;
+ let size = left_layout.size;
+ oflo |= r >= size.bits() as u32;
if oflo {
- r %= size;
+ r %= size.bits() as u32;
}
let result = if signed {
- let l = self.sign_extend(l, left_ty)? as i128;
+ let l = self.sign_extend(l, left_layout) as i128;
let result = match bin_op {
Shl => l << r,
Shr => l >> r,
_ => bug!("it has already been checked that this is a shift op"),
}
};
- let truncated = self.truncate(result, left_ty)?;
+ let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits {
bits: truncated,
- defined: size as u8,
+ size: size.bytes() as u8,
}, oflo));
}
_ => None,
};
if let Some(op) = op {
- let l = self.sign_extend(l, left_ty)? as i128;
- let r = self.sign_extend(r, right_ty)? as i128;
+ let l = self.sign_extend(l, left_layout) as i128;
+ let r = self.sign_extend(r, right_layout) as i128;
return Ok((Scalar::from_bool(op(&l, &r)), false));
}
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
_ => None,
};
if let Some(op) = op {
- let l128 = self.sign_extend(l, left_ty)? as i128;
- let r = self.sign_extend(r, right_ty)? as i128;
- let size = left_layout.size.bits();
+ let l128 = self.sign_extend(l, left_layout) as i128;
+ let r = self.sign_extend(r, right_layout) as i128;
+ let size = left_layout.size;
match bin_op {
Rem | Div => {
// int_min / -1
- if r == -1 && l == (1 << (size - 1)) {
- return Ok((Scalar::Bits { bits: l, defined: size as u8 }, true));
+ if r == -1 && l == (1 << (size.bits() - 1)) {
+ return Ok((Scalar::Bits { bits: l, size: size.bytes() as u8 }, true));
}
},
_ => {},
trace!("{}, {}, {}", l, l128, r);
let (result, mut oflo) = op(l128, r);
trace!("{}, {}", result, oflo);
- if !oflo && size != 128 {
- let max = 1 << (size - 1);
+ if !oflo && size.bits() != 128 {
+ let max = 1 << (size.bits() - 1);
oflo = result >= max || result < -max;
}
let result = result as u128;
- let truncated = self.truncate(result, left_ty)?;
+ let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits {
bits: truncated,
- defined: size as u8,
+ size: size.bytes() as u8,
}, oflo));
}
}
if let ty::TyFloat(fty) = left_ty.sty {
macro_rules! float_math {
- ($ty:path, $bitsize:expr) => {{
+ ($ty:path, $size:expr) => {{
let l = <$ty>::from_bits(l);
let r = <$ty>::from_bits(r);
let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits {
bits: res.value.to_bits(),
- defined: $bitsize,
+ size: $size,
};
let val = match bin_op {
Eq => Scalar::from_bool(l == r),
}};
}
match fty {
- FloatTy::F32 => float_math!(Single, 32),
- FloatTy::F64 => float_math!(Double, 64),
+ FloatTy::F32 => float_math!(Single, 4),
+ FloatTy::F64 => float_math!(Double, 8),
}
}
- let bit_width = self.layout_of(left_ty).unwrap().size.bits() as u8;
+ let size = self.layout_of(left_ty).unwrap().size.bytes() as u8;
// only ints left
let val = match bin_op {
Gt => Scalar::from_bool(l > r),
Ge => Scalar::from_bool(l >= r),
- BitOr => Scalar::Bits { bits: l | r, defined: bit_width },
- BitAnd => Scalar::Bits { bits: l & r, defined: bit_width },
- BitXor => Scalar::Bits { bits: l ^ r, defined: bit_width },
+ BitOr => Scalar::Bits { bits: l | r, size },
+ BitAnd => Scalar::Bits { bits: l & r, size },
+ BitXor => Scalar::Bits { bits: l ^ r, size },
Add | Sub | Mul | Rem | Div => {
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
_ => bug!(),
};
let (result, oflo) = op(l, r);
- let truncated = self.truncate(result, left_ty)?;
+ let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits {
bits: truncated,
- defined: bit_width,
+ size,
}, oflo || truncated != result));
}
&self,
un_op: mir::UnOp,
val: Scalar,
- ty: Ty<'tcx>,
+ layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
use rustc::mir::UnOp::*;
use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float;
- let size = self.layout_of(ty)?.size;
+ let size = layout.size;
let bytes = val.to_bits(size)?;
- let size = size.bits();
- let result_bytes = match (un_op, &ty.sty) {
+ let result_bytes = match (un_op, &layout.ty.sty) {
(Not, ty::TyBool) => !val.to_bool()? as u128,
(Neg, ty::TyFloat(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)),
(Neg, ty::TyFloat(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)),
- (Neg, _) if bytes == (1 << (size - 1)) => return err!(OverflowNeg),
+ (Neg, _) if bytes == (1 << (size.bits() - 1)) => return err!(OverflowNeg),
(Neg, _) => (-(bytes as i128)) as u128,
};
Ok(Scalar::Bits {
- bits: self.truncate(result_bytes, ty)?,
- defined: size as u8,
+ bits: self.truncate(result_bytes, layout),
+ size: size.bytes() as u8,
})
}
}
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc_data_structures::indexed_vec::Idx;
-use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer};
+use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer, ScalarMaybeUndef};
use super::{EvalContext, Machine, ValTy};
use interpret::memory::HasMemory;
/// A place may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
- ptr: Scalar,
+ ptr: ScalarMaybeUndef,
align: Align,
extra: PlaceExtra,
},
impl<'tcx> Place {
/// Produces a Place that will error if attempted to be read from
pub fn undef() -> Self {
- Self::from_scalar_ptr(Scalar::undef().into(), Align::from_bytes(1, 1).unwrap())
+ Self::from_scalar_ptr(ScalarMaybeUndef::Undef, Align::from_bytes(1, 1).unwrap())
}
- pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
+ pub fn from_scalar_ptr(ptr: ScalarMaybeUndef, align: Align) -> Self {
Place::Ptr {
ptr,
align,
}
pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
- Self::from_scalar_ptr(ptr.into(), align)
+ Self::from_scalar_ptr(ScalarMaybeUndef::Scalar(ptr.into()), align)
}
- pub fn to_ptr_align_extra(self) -> (Scalar, Align, PlaceExtra) {
+ pub fn to_ptr_align_extra(self) -> (ScalarMaybeUndef, Align, PlaceExtra) {
match self {
Place::Ptr { ptr, align, extra } => (ptr, align, extra),
_ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self),
}
}
- pub fn to_ptr_align(self) -> (Scalar, Align) {
+ pub fn to_ptr_align(self) -> (ScalarMaybeUndef, Align) {
let (ptr, align, _extra) = self.to_ptr_align_extra();
(ptr, align)
}
-
+/*
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
// At this point, we forget about the alignment information -- the place has been turned into a reference,
// and no matter where it came from, it now must be aligned.
self.to_ptr_align().0.to_ptr()
}
-
+*/
pub(super) fn elem_ty_and_len(
self,
ty: Ty<'tcx>,
// Might allow this in the future, right now there's no way to do this from Rust code anyway
Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer),
// Directly reading a local will always succeed
- Local(local) => self.frame().get_local(local).map(Some),
+ Local(local) => self.frame().locals[local].access().map(Some),
// No fast path for statics. Reading from statics is rare and would require another
// Machine function to handle differently in miri.
Promoted(_) |
let field = base_layout.field(self, field_index)?;
if field.size.bytes() == 0 {
return Ok((
- Value::Scalar(Scalar::undef()),
+ Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })),
field,
));
}
match place {
Place::Ptr { ptr, align, extra } => {
assert_eq!(extra, PlaceExtra::None);
- Ok(Value::ByRef(ptr, align))
+ Ok(Value::ByRef(ptr.read()?, align))
}
- Place::Local { frame, local } => self.stack[frame].get_local(local),
+ Place::Local { frame, local } => self.stack[frame].locals[local].access(),
}
}
})?;
if let Value::ByRef(ptr, align) = val {
Place::Ptr {
- ptr,
+ ptr: ptr.into(),
align,
extra: PlaceExtra::None,
}
};
let alloc = Machine::init_static(self, cid)?;
Place::Ptr {
- ptr: Scalar::Ptr(alloc.into()),
+ ptr: ScalarMaybeUndef::Scalar(Scalar::Ptr(alloc.into())),
align: layout.align,
extra: PlaceExtra::None,
}
let (base_ptr, base_align, base_extra) = match base {
Place::Ptr { ptr, align, extra } => (ptr, align, extra),
Place::Local { frame, local } => {
- match (&self.stack[frame].get_local(local)?, &base_layout.abi) {
+ match (self.stack[frame].locals[local].access()?, &base_layout.abi) {
// in case the field covers the entire type, just return the value
- (&Value::Scalar(_), &layout::Abi::Scalar(_)) |
- (&Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
- if offset.bytes() == 0 && field.size == base_layout.size =>
- {
- return Ok((base, field));
- }
+ (Value::Scalar(_), &layout::Abi::Scalar(_)) |
+ (Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
+ if offset.bytes() == 0 && field.size == base_layout.size => {
+ return Ok((base, field))
+ },
_ => self.force_allocation(base)?.to_ptr_align_extra(),
}
}
}
Index(local) => {
- let value = self.frame().get_local(local)?;
+ let value = self.frame().locals[local].access()?;
let ty = self.tcx.types.usize;
let n = self
.value_to_scalar(ValTy { value, ty })?
// Mark locals as alive
StorageLive(local) => {
- let old_val = self.frame_mut().storage_live(local);
+ let old_val = self.storage_live(local)?;
self.deallocate_local(old_val)?;
}
use rustc::ty::{self, Ty};
use syntax::codemap::Span;
-use rustc::mir::interpret::{EvalResult, Scalar, Value};
+use rustc::mir::interpret::{EvalResult, Value};
use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
ptr,
align: _,
extra: PlaceExtra::None,
- } => ptr.to_value(),
+ } => Value::Scalar(ptr),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span, target)
let instance = match ty.sty {
ty::TyDynamic(..) => {
- let vtable = match arg {
- Value::ScalarPair(_, Scalar::Ptr(vtable)) => vtable,
- _ => bug!("expected fat ptr, got {:?}", arg),
- };
- match self.read_drop_type_from_vtable(vtable)? {
- Some(func) => func,
- // no drop fn -> bail out
- None => {
- self.goto_block(target);
- return Ok(())
- },
+ if let Value::ScalarPair(_, vtable) = arg {
+ self.read_drop_type_from_vtable(vtable.read()?.to_ptr()?)?
+ } else {
+ bug!("expected fat ptr, got {:?}", arg);
}
}
_ => instance,
use syntax::codemap::Span;
use rustc_target::spec::abi::Abi;
-use rustc::mir::interpret::{EvalResult, Scalar};
+use rustc::mir::interpret::{EvalResult, Scalar, Value};
use super::{EvalContext, Place, Machine, ValTy};
use rustc_data_structures::indexed_vec::Idx;
for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op
- let const_int = Scalar::Bits { bits: const_int, defined: 128 };
+ let const_int = Scalar::Bits { bits: const_int, size: discr_layout.size.bytes() as u8 };
let res = self.binary_op(mir::BinOp::Eq,
discr_prim, discr_val.ty,
const_int, discr_val.ty
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
- )?.to_ptr()?;
+ )?.read()?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();
let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
args[0].ty = ty;
- args[0].value = ptr.to_value();
+ args[0].value = Value::Scalar(ptr);
// recurse with concrete function
self.eval_fn_call(instance, destination, &args, span, sig)
}
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf};
-use rustc::mir::interpret::{Scalar, Value, Pointer, EvalResult};
+use rustc::mir::interpret::{Scalar, Pointer, EvalResult};
use syntax::ast::Mutability;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
- self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?;
+ self.memory.write_ptr_sized_unsigned(vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
bits: size as u128,
- defined: ptr_size.bits() as u8,
- })?;
+ size: ptr_size.bytes() as u8,
+ }.into())?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
bits: align as u128,
- defined: ptr_size.bits() as u8,
- })?;
+ size: ptr_size.bytes() as u8,
+ }.into())?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
- self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?;
+ self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
}
}
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer,
- ) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
+ ) -> EvalResult<'tcx, ty::Instance<'tcx>> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align;
- let pointer_size = self.tcx.data_layout.pointer_size.bits() as u8;
- match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? {
- // some values don't need to call a drop impl, so the value is null
- Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None),
- Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
- _ => err!(ReadBytesAsPointer),
- }
+ let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.read()?.to_ptr()?;
+ self.memory.get_fn(drop_fn)
}
pub fn read_size_and_align_from_vtable(
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
- let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
+ let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.read()?.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
- )?.to_bits(pointer_size)? as u64;
+ )?.read()?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
}
}
use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind};
use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem};
use rustc::mir::visit::{Visitor, PlaceContext};
-use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind};
+use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind, ScalarMaybeUndef};
use rustc::ty::{TyCtxt, self, Instance};
use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult};
use interpret::EvalContext;
type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some((
Value::Scalar(Scalar::Bits {
bits: n as u128,
- defined: self.tcx.data_layout.pointer_size.bits() as u8,
+ size: self.tcx.data_layout.pointer_size.bytes() as u8,
}),
self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
span,
let prim = self.use_ecx(source_info, |this| {
this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1.ty })
})?;
- let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1.ty))?;
+ let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1))?;
Some((Value::Scalar(val), place_layout, span))
}
Rvalue::CheckedBinaryOp(op, ref left, ref right) |
})?;
let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue {
Value::ScalarPair(
- val,
- Scalar::from_bool(overflow),
+ val.into(),
+ Scalar::from_bool(overflow).into(),
)
} else {
if overflow {
let _: Option<()> = self.use_ecx(source_info, |_| Err(err));
return None;
}
- Value::Scalar(val)
+ Value::Scalar(val.into())
};
Some((val, place_layout, span))
},
if let TerminatorKind::Assert { expected, msg, cond, .. } = kind {
if let Some(value) = self.eval_operand(cond, source_info) {
trace!("assertion on {:?} should be {:?}", value, expected);
- if Value::Scalar(Scalar::from_bool(*expected)) != value.0 {
+ if Value::Scalar(Scalar::from_bool(*expected).into()) != value.0 {
// poison all places this operand references so that further code
// doesn't use the invalid value
match cond {
.eval_operand(len, source_info)
.expect("len must be const");
let len = match len.0 {
- Value::Scalar(Scalar::Bits { bits, ..}) => bits,
+ Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
+ bits, ..
+ })) => bits,
_ => bug!("const len not primitive: {:?}", len),
};
let index = self
.eval_operand(index, source_info)
.expect("index must be const");
let index = match index.0 {
- Value::Scalar(Scalar::Bits { bits, .. }) => bits,
+ Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
+ bits, ..
+ })) => bits,
_ => bug!("const index not primitive: {:?}", index),
};
format!(