//! The virtual memory representation of the MIR interpreter.
use super::{
- Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
+ Pointer, InterpResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
};
use crate::ty::layout::{Size, Align};
_alloc: &Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
_alloc: &mut Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
_alloc: &mut Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
}
&self,
ptr: Pointer<Tag>,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let allocation_size = self.bytes.len() as u64;
ptr.check_in_alloc(Size::from_bytes(allocation_size), msg)
}
ptr: Pointer<Tag>,
size: Size,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds_ptr(ptr.offset(size, cx)?, msg)
}
size: Size,
check_defined_and_ptr: bool,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
self.check_bounds(cx, ptr, size, msg)?;
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
self.get_bytes_internal(cx, ptr, size, true, CheckInAllocMsg::MemoryAccessTest)
}
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
self.get_bytes_internal(cx, ptr, size, false, CheckInAllocMsg::MemoryAccessTest)
}
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx, &mut [u8]>
+ ) -> InterpResult<'tcx, &mut [u8]>
{
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_bounds(cx, ptr, size, CheckInAllocMsg::MemoryAccessTest)?;
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
ptr: Pointer<Tag>,
size: Size,
allow_ptr_and_undef: bool,
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
// Check bounds and relocations on the edges
self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
src: &[u8],
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(src.len() as u64))?;
bytes.clone_from_slice(src);
ptr: Pointer<Tag>,
val: u8,
count: Size
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let bytes = self.get_bytes_mut(cx, ptr, count)?;
for b in bytes {
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size
- ) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>>
+ ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
{
// get_bytes_unchecked tests relocation edges
let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
- ) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>>
+ ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
{
self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
}
ptr: Pointer<Tag>,
val: ScalarMaybeUndef<Tag>,
type_size: Size,
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: ScalarMaybeUndef<Tag>
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let ptr_size = cx.data_layout().pointer_size;
self.write_scalar(cx, ptr.into(), val, ptr_size)
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if self.relocations(cx, ptr, size).is_empty() {
Ok(())
} else {
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.check_relocations(cx, ptr, Size::ZERO)?;
self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
Ok(())
/// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
/// error which will report the first byte which is undefined.
#[inline]
- fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> EvalResult<'tcx> {
+ fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.undef_mask.is_range_defined(
ptr.offset,
ptr.offset + size,
ptr: Pointer<Tag>,
size: Size,
new_state: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if size.bytes() == 0 {
return Ok(());
}
/// a `InterpError`. In `librustc_mir::interpret`, we have the `err!`
/// macro for this
#[derive(Debug, Clone)]
-pub struct EvalError<'tcx> {
+pub struct InterpErrorInfo<'tcx> {
pub kind: InterpError<'tcx, u64>,
backtrace: Option<Box<Backtrace>>,
}
-impl<'tcx> EvalError<'tcx> {
+impl<'tcx> InterpErrorInfo<'tcx> {
pub fn print_backtrace(&mut self) {
if let Some(ref mut backtrace) = self.backtrace {
print_backtrace(&mut *backtrace);
eprintln!("\n\nAn error occurred in miri:\n{:?}", backtrace);
}
-impl<'tcx> From<InterpError<'tcx, u64>> for EvalError<'tcx> {
+impl<'tcx> From<InterpError<'tcx, u64>> for InterpErrorInfo<'tcx> {
fn from(kind: InterpError<'tcx, u64>) -> Self {
let backtrace = match env::var("RUST_CTFE_BACKTRACE") {
// Matching `RUST_BACKTRACE` -- we treat "0" the same as "not present".
},
_ => None,
};
- EvalError {
+ InterpErrorInfo {
kind,
backtrace,
}
InfiniteLoop,
}
-pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
+pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
impl<'tcx, O> InterpError<'tcx, O> {
pub fn description(&self) -> &str {
}
}
-impl<'tcx> fmt::Display for EvalError<'tcx> {
+impl<'tcx> fmt::Display for InterpErrorInfo<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.kind)
}
mod pointer;
pub use self::error::{
- EvalError, EvalResult, InterpError, AssertMessage, ConstEvalErr, struct_error,
+ InterpErrorInfo, InterpResult, InterpError, AssertMessage, ConstEvalErr, struct_error,
FrameInfo, ConstEvalRawResult, ConstEvalResult, ErrorHandled,
};
use rustc_macros::HashStable;
use super::{
- AllocId, EvalResult, CheckInAllocMsg
+ AllocId, InterpResult, CheckInAllocMsg
};
////////////////////////////////////////////////////////////////////////////////
}
#[inline]
- fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
+ fn offset<'tcx>(&self, val: u64, i: u64) -> InterpResult<'tcx, u64> {
let (res, over) = self.overflowing_offset(val, i);
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
#[inline]
- fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
+ fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i128::from(i));
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
}
#[inline]
- pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
}
#[inline]
- pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
self,
allocation_size: Size,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx, ()> {
+ ) -> InterpResult<'tcx, ()> {
if self.offset > allocation_size {
err!(PointerOutOfBounds {
ptr: self.erase_tag(),
use crate::ty::PlaceholderConst;
use crate::hir::def_id::DefId;
-use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate};
+use super::{InterpResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate};
/// Represents the result of a raw const operation, pre-validation.
#[derive(Copy, Clone, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Hash, HashStable)]
}
#[inline]
- pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
}
#[inline]
- pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
}
#[inline]
- pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
match self {
Scalar::Raw { data, size } => {
assert_eq!(target_size.bytes(), size as u64);
}
#[inline]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
match self {
Scalar::Raw { data: 0, .. } => err!(InvalidNullPointerUsage),
Scalar::Raw { .. } => err!(ReadBytesAsPointer),
}
}
- pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
match self {
Scalar::Raw { data: 0, size: 1 } => Ok(false),
Scalar::Raw { data: 1, size: 1 } => Ok(true),
}
}
- pub fn to_char(self) -> EvalResult<'tcx, char> {
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
let val = self.to_u32()?;
match ::std::char::from_u32(val) {
Some(c) => Ok(c),
}
}
- pub fn to_u8(self) -> EvalResult<'static, u8> {
+ pub fn to_u8(self) -> InterpResult<'static, u8> {
let sz = Size::from_bits(8);
let b = self.to_bits(sz)?;
Ok(b as u8)
}
- pub fn to_u32(self) -> EvalResult<'static, u32> {
+ pub fn to_u32(self) -> InterpResult<'static, u32> {
let sz = Size::from_bits(32);
let b = self.to_bits(sz)?;
Ok(b as u32)
}
- pub fn to_u64(self) -> EvalResult<'static, u64> {
+ pub fn to_u64(self) -> InterpResult<'static, u64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
Ok(b as u64)
}
- pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> {
+ pub fn to_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
Ok(b as u64)
}
- pub fn to_i8(self) -> EvalResult<'static, i8> {
+ pub fn to_i8(self) -> InterpResult<'static, i8> {
let sz = Size::from_bits(8);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
Ok(b as i8)
}
- pub fn to_i32(self) -> EvalResult<'static, i32> {
+ pub fn to_i32(self) -> InterpResult<'static, i32> {
let sz = Size::from_bits(32);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
Ok(b as i32)
}
- pub fn to_i64(self) -> EvalResult<'static, i64> {
+ pub fn to_i64(self) -> InterpResult<'static, i64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
Ok(b as i64)
}
- pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> {
+ pub fn to_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
let sz = cx.data_layout().pointer_size;
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
}
#[inline]
- pub fn to_f32(self) -> EvalResult<'static, f32> {
+ pub fn to_f32(self) -> InterpResult<'static, f32> {
Ok(f32::from_bits(self.to_u32()?))
}
#[inline]
- pub fn to_f64(self) -> EvalResult<'static, f64> {
+ pub fn to_f64(self) -> InterpResult<'static, f64> {
Ok(f64::from_bits(self.to_u64()?))
}
}
}
#[inline]
- pub fn not_undef(self) -> EvalResult<'static, Scalar<Tag>> {
+ pub fn not_undef(self) -> InterpResult<'static, Scalar<Tag>> {
match self {
ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))),
}
#[inline(always)]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
self.not_undef()?.to_ptr()
}
#[inline(always)]
- pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
self.not_undef()?.to_bits(target_size)
}
#[inline(always)]
- pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
self.not_undef()?.to_bool()
}
#[inline(always)]
- pub fn to_char(self) -> EvalResult<'tcx, char> {
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
self.not_undef()?.to_char()
}
#[inline(always)]
- pub fn to_f32(self) -> EvalResult<'tcx, f32> {
+ pub fn to_f32(self) -> InterpResult<'tcx, f32> {
self.not_undef()?.to_f32()
}
#[inline(always)]
- pub fn to_f64(self) -> EvalResult<'tcx, f64> {
+ pub fn to_f64(self) -> InterpResult<'tcx, f64> {
self.not_undef()?.to_f64()
}
#[inline(always)]
- pub fn to_u8(self) -> EvalResult<'tcx, u8> {
+ pub fn to_u8(self) -> InterpResult<'tcx, u8> {
self.not_undef()?.to_u8()
}
#[inline(always)]
- pub fn to_u32(self) -> EvalResult<'tcx, u32> {
+ pub fn to_u32(self) -> InterpResult<'tcx, u32> {
self.not_undef()?.to_u32()
}
#[inline(always)]
- pub fn to_u64(self) -> EvalResult<'tcx, u64> {
+ pub fn to_u64(self) -> InterpResult<'tcx, u64> {
self.not_undef()?.to_u64()
}
#[inline(always)]
- pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
+ pub fn to_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
self.not_undef()?.to_usize(cx)
}
#[inline(always)]
- pub fn to_i8(self) -> EvalResult<'tcx, i8> {
+ pub fn to_i8(self) -> InterpResult<'tcx, i8> {
self.not_undef()?.to_i8()
}
#[inline(always)]
- pub fn to_i32(self) -> EvalResult<'tcx, i32> {
+ pub fn to_i32(self) -> InterpResult<'tcx, i32> {
self.not_undef()?.to_i32()
}
#[inline(always)]
- pub fn to_i64(self) -> EvalResult<'tcx, i64> {
+ pub fn to_i64(self) -> InterpResult<'tcx, i64> {
self.not_undef()?.to_i64()
}
#[inline(always)]
- pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> {
+ pub fn to_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
self.not_undef()?.to_isize(cx)
}
}
use crate::interpret::{self,
PlaceTy, MPlaceTy, MemPlace, OpTy, ImmTy, Immediate, Scalar,
RawConst, ConstValue,
- EvalResult, EvalError, InterpError, GlobalId, InterpretCx, StackPopCleanup,
+ InterpResult, InterpErrorInfo, InterpError, GlobalId, InterpretCx, StackPopCleanup,
Allocation, AllocId, MemoryKind,
snapshot, RefTracking,
};
cid: GlobalId<'tcx>,
mir: &'mir mir::Body<'tcx>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
let span = tcx.def_span(cid.instance.def_id());
let mut ecx = mk_eval_cx(tcx, span, param_env);
eval_body_using_ecx(&mut ecx, cid, mir, param_env)
cid: GlobalId<'tcx>,
mir: &'mir mir::Body<'tcx>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
debug!("eval_body_using_ecx: {:?}, {:?}", cid, param_env);
let tcx = ecx.tcx.tcx;
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
Ok(ret)
}
-impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
- fn into(self) -> EvalError<'tcx> {
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalError {
+ fn into(self) -> InterpErrorInfo<'tcx> {
InterpError::MachineError(self.to_string()).into()
}
}
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
- ) -> EvalResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
+ ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
debug!("eval_fn_call: {:?}", instance);
// Only check non-glue functions
if let ty::InstanceDef::Item(def_id) = instance.def {
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if ecx.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
_bin_op: mir::BinOp,
_left: ImmTy<'tcx>,
_right: ImmTy<'tcx>,
- ) -> EvalResult<'tcx, (Scalar, bool)> {
+ ) -> InterpResult<'tcx, (Scalar, bool)> {
Err(
ConstEvalError::NeedsRfc("pointer arithmetic or comparison".to_string()).into(),
)
fn find_foreign_static(
_def_id: DefId,
_tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
- ) -> EvalResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>> {
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>> {
err!(ReadForeignStatic)
}
fn box_alloc(
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
_dest: PlaceTy<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Err(
ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(),
)
}
- fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx> {
+ fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> InterpResult<'tcx> {
{
let steps = &mut ecx.machine.steps_since_detector_enabled;
#[inline(always)]
fn stack_push(
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
fn stack_pop(
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
_extra: (),
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
}
pub fn error_to_const_error<'a, 'mir, 'tcx>(
ecx: &InterpretCx<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
- mut error: EvalError<'tcx>
+ mut error: InterpErrorInfo<'tcx>
) -> ConstEvalErr<'tcx> {
error.print_backtrace();
let stacktrace = ecx.generate_stacktrace(None);
use rustc_apfloat::ieee::{Single, Double};
use rustc::mir::interpret::{
- Scalar, EvalResult, Pointer, PointerArithmetic, InterpError,
+ Scalar, InterpResult, Pointer, PointerArithmetic, InterpError,
};
use rustc::mir::CastKind;
use rustc_apfloat::Float;
src: OpTy<'tcx, M::PointerTag>,
kind: CastKind,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
use rustc::mir::CastKind::*;
match kind {
Pointer(PointerCast::Unsize) => {
if self.tcx.has_attr(def_id, sym::rustc_args_required_const) {
bug!("reifying a fn ptr that requires const arguments");
}
- let instance: EvalResult<'tcx, _> = ty::Instance::resolve(
+ let instance: InterpResult<'tcx, _> = ty::Instance::resolve(
*self.tcx,
self.param_env,
def_id,
val: Scalar<M::PointerTag>,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
v: u128,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
let signed = src_layout.abi.is_signed();
let v = if signed {
self.sign_extend(v, src_layout)
bits: u128,
fty: FloatTy,
dest_ty: Ty<'tcx>
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert;
match dest_ty.sty {
&self,
ptr: Pointer<M::PointerTag>,
ty: Ty<'tcx>
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc,
// The pointee types
sty: Ty<'tcx>,
dty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// A<Struct> -> A<Trait> conversion
let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("Unsizing {:?} into {:?}", src, dest);
match (&src.layout.ty.sty, &dest.layout.ty.sty) {
(&ty::Ref(_, s, _), &ty::Ref(_, d, _)) |
use rustc::mir::interpret::{
ErrorHandled,
GlobalId, Scalar, Pointer, FrameInfo, AllocId,
- EvalResult, InterpError,
+ InterpResult, InterpError,
truncate, sign_extend,
};
use rustc_data_structures::fx::FxHashMap;
}
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
- pub fn access(&self) -> EvalResult<'tcx, Operand<Tag>> {
+ pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
match self.value {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Uninitialized =>
/// to do so; otherwise return the `MemPlace` to consult instead.
pub fn access_mut(
&mut self,
- ) -> EvalResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
+ ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
match self.value {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
for InterpretCx<'a, 'mir, 'tcx, M>
{
type Ty = Ty<'tcx>;
- type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
+ type TyLayout = InterpResult<'tcx, TyLayout<'tcx>>;
#[inline]
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
pub(super) fn subst_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
&self,
substs: T,
- ) -> EvalResult<'tcx, T> {
+ ) -> InterpResult<'tcx, T> {
match self.stack.last() {
Some(frame) => Ok(self.tcx.subst_and_normalize_erasing_regions(
frame.instance.substs,
&self,
def_id: DefId,
substs: SubstsRef<'tcx>
- ) -> EvalResult<'tcx, ty::Instance<'tcx>> {
+ ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
trace!("resolve: {:?}, {:#?}", def_id, substs);
trace!("param_env: {:#?}", self.param_env);
let substs = self.subst_and_normalize_erasing_regions(substs)?;
pub fn load_mir(
&self,
instance: ty::InstanceDef<'tcx>,
- ) -> EvalResult<'tcx, &'tcx mir::Body<'tcx>> {
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
// do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id();
if did.is_local()
pub(super) fn monomorphize<T: TypeFoldable<'tcx> + Subst<'tcx>>(
&self,
t: T,
- ) -> EvalResult<'tcx, T> {
+ ) -> InterpResult<'tcx, T> {
match self.stack.last() {
Some(frame) => Ok(self.monomorphize_with_substs(t, frame.instance.substs)),
None => if t.needs_subst() {
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
local: mir::Local,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ ) -> InterpResult<'tcx, TyLayout<'tcx>> {
match frame.locals[local].layout.get() {
None => {
let layout = crate::interpret::operand::from_known_layout(layout, || {
&self,
metadata: Option<Scalar<M::PointerTag>>,
layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, Option<(Size, Align)>> {
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
return Ok(Some((layout.size, layout.align.abi)));
}
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, Option<(Size, Align)>> {
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
self.size_and_align_of(mplace.meta, mplace.layout)
}
mir: &'mir mir::Body<'tcx>,
return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
return_to_block: StackPopCleanup,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if self.stack.len() > 0 {
info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance);
}
}
}
- pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
+ pub(super) fn pop_stack_frame(&mut self) -> InterpResult<'tcx> {
info!("LEAVING({}) {}", self.cur_frame(), self.frame().instance);
::log_settings::settings().indentation -= 1;
let frame = self.stack.pop().expect(
pub fn storage_live(
&mut self,
local: mir::Local
- ) -> EvalResult<'tcx, LocalValue<M::PointerTag>> {
+ ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
pub(super) fn deallocate_local(
&mut self,
local: LocalValue<M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
pub fn const_eval_raw(
&self,
gid: GlobalId<'tcx>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let param_env = if self.tcx.is_static(gid.instance.def_id()) {
ty::ParamEnv::reveal_all()
} else {
use rustc::ty::layout::{LayoutOf, Primitive, Size};
use rustc::mir::BinOp;
use rustc::mir::interpret::{
- EvalResult, InterpError, Scalar,
+ InterpResult, InterpError, Scalar,
};
use super::{
name: &str,
bits: u128,
kind: Primitive,
-) -> EvalResult<'tcx, Scalar<Tag>> {
+) -> InterpResult<'tcx, Scalar<Tag>> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, bool> {
+ ) -> InterpResult<'tcx, bool> {
let substs = instance.substs;
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
- ) -> EvalResult<'tcx, bool> {
+ ) -> InterpResult<'tcx, bool> {
let def_id = instance.def_id();
// Some fn calls are actually BinOp intrinsics
if let Some((op, oflo)) = self.tcx.is_binop_lang_item(def_id) {
use rustc::ty::{self, query::TyCtxtAt};
use super::{
- Allocation, AllocId, EvalResult, Scalar, AllocationExtra,
+ Allocation, AllocId, InterpResult, Scalar, AllocationExtra,
InterpretCx, PlaceTy, OpTy, ImmTy, MemoryKind,
};
/// Called before a basic block terminator is executed.
/// You can use this to detect endlessly running programs.
- fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx>;
+ fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> InterpResult<'tcx>;
/// Entry point to all function calls.
///
args: &[OpTy<'tcx, Self::PointerTag>],
dest: Option<PlaceTy<'tcx, Self::PointerTag>>,
ret: Option<mir::BasicBlock>,
- ) -> EvalResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
+ ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
/// Directly process an intrinsic without pushing a stack frame.
/// If this returns successfully, the engine will take care of jumping to the next block.
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Self::PointerTag>],
dest: PlaceTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx>;
+ ) -> InterpResult<'tcx>;
/// Called for read access to a foreign static item.
///
fn find_foreign_static(
def_id: DefId,
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
- ) -> EvalResult<'tcx, Cow<'tcx, Allocation>>;
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation>>;
/// Called for all binary operations on integer(-like) types when one operand is a pointer
/// value, and for the `Offset` operation that is inherently about pointers.
bin_op: mir::BinOp,
left: ImmTy<'tcx, Self::PointerTag>,
right: ImmTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
+ ) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
/// Heap allocations via the `box` keyword.
fn box_alloc(
ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
dest: PlaceTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx>;
+ ) -> InterpResult<'tcx>;
/// Called to initialize the "extra" state of an allocation and make the pointers
/// it contains (in relocations) tagged. The way we construct allocations is
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
_kind: mir::RetagKind,
_place: PlaceTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
/// Called immediately before a new stack frame got pushed
fn stack_push(
ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
- ) -> EvalResult<'tcx, Self::FrameExtra>;
+ ) -> InterpResult<'tcx, Self::FrameExtra>;
/// Called immediately after a stack frame gets popped
fn stack_pop(
ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
extra: Self::FrameExtra,
- ) -> EvalResult<'tcx>;
+ ) -> InterpResult<'tcx>;
}
use super::{
Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
- EvalResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
+ InterpResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, InboundsCheck,
};
new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
- ) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
if ptr.offset.bytes() != 0 {
return err!(ReallocateNonBasePtr);
}
}
/// Deallocate a local, or do nothing if that local has been made into a static
- pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx> {
+ pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
// The allocation might be already removed by static interning.
// This can only really happen in the CTFE instance, not in miri.
if self.alloc_map.contains_key(&ptr.alloc_id) {
ptr: Pointer<M::PointerTag>,
size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKinds>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id);
if ptr.offset.bytes() != 0 {
&self,
ptr: Scalar<M::PointerTag>,
required_align: Align
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr.to_bits_or_ptr(self.pointer_size(), self) {
Err(ptr) => {
ptr: Pointer<M::PointerTag>,
liveness: InboundsCheck,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx, Align> {
+ ) -> InterpResult<'tcx, Align> {
let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id, liveness)?;
ptr.check_in_alloc(allocation_size, msg)?;
Ok(align)
id: AllocId,
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
memory_extra: &M::MemoryExtra,
- ) -> EvalResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
let alloc = tcx.alloc_map.lock().get(id);
let alloc = match alloc {
Some(GlobalAlloc::Memory(mem)) =>
).0)
}
- pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
+ pub fn get(
+ &self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
// The error type of the inner closure here is somewhat funny. We have two
// ways of "erroring": An actual error, or because we got a reference from
// `get_static_alloc` that we can actually use directly without inserting anything anywhere.
- // So the error type is `EvalResult<'tcx, &Allocation<M::PointerTag>>`.
+ // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
let a = self.alloc_map.get_or(id, || {
let alloc = Self::get_static_alloc(id, self.tcx, &self.extra).map_err(Err)?;
match alloc {
pub fn get_mut(
&mut self,
id: AllocId,
- ) -> EvalResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
+ ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
let tcx = self.tcx;
let memory_extra = &self.extra;
let a = self.alloc_map.get_mut_or(id, || {
&self,
id: AllocId,
liveness: InboundsCheck,
- ) -> EvalResult<'static, (Size, Align)> {
+ ) -> InterpResult<'static, (Size, Align)> {
if let Ok(alloc) = self.get(id) {
return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
}
}
}
- pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, Instance<'tcx>> {
+ pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx, Instance<'tcx>> {
if ptr.offset.bytes() != 0 {
return err!(InvalidFunctionPointer);
}
}
}
- pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> {
+ pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
self.get_mut(id)?.mutability = Mutability::Immutable;
Ok(())
}
&self,
ptr: Scalar<M::PointerTag>,
size: Size,
- ) -> EvalResult<'tcx, &[u8]> {
+ ) -> InterpResult<'tcx, &[u8]> {
if size.bytes() == 0 {
Ok(&[])
} else {
&mut self,
alloc_id: AllocId,
mutability: Mutability,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!(
"mark_static_initialized {:?}, mutability: {:?}",
alloc_id,
dest_align: Align,
size: Size,
nonoverlapping: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
}
size: Size,
length: u64,
nonoverlapping: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
if size.bytes() == 0 {
dest: Pointer<M::PointerTag>,
size: Size,
repeat: u64,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// The bits have to be saved locally before writing to dest in case src and dest overlap.
assert_eq!(size.bytes() as usize as u64, size.bytes());
use rustc::mir::interpret::{
GlobalId, AllocId, CheckInAllocMsg,
ConstValue, Pointer, Scalar,
- EvalResult, InterpError, InboundsCheck,
+ InterpResult, InterpError, InboundsCheck,
sign_extend, truncate,
};
use super::{
}
#[inline]
- pub fn to_scalar(self) -> EvalResult<'tcx, Scalar<Tag>> {
+ pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
self.to_scalar_or_undef().not_undef()
}
#[inline]
- pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
+ pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
match self {
Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?))
/// Converts the immediate into a pointer (or a pointer-sized integer).
/// Throws away the second half of a ScalarPair!
#[inline]
- pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar<Tag>> {
+ pub fn to_scalar_ptr(self) -> InterpResult<'tcx, Scalar<Tag>> {
match self {
Immediate::Scalar(ptr) |
Immediate::ScalarPair(ptr, _) => ptr.not_undef(),
/// Converts the value into its metadata.
/// Throws away the first half of a ScalarPair!
#[inline]
- pub fn to_meta(self) -> EvalResult<'tcx, Option<Scalar<Tag>>> {
+ pub fn to_meta(self) -> InterpResult<'tcx, Option<Scalar<Tag>>> {
Ok(match self {
Immediate::Scalar(_) => None,
Immediate::ScalarPair(_, meta) => Some(meta.not_undef()?),
}
#[inline]
- pub fn to_bits(self) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self) -> InterpResult<'tcx, u128> {
self.to_scalar()?.to_bits(self.layout.size)
}
}
#[inline(always)]
pub(super) fn from_known_layout<'tcx>(
layout: Option<TyLayout<'tcx>>,
- compute: impl FnOnce() -> EvalResult<'tcx, TyLayout<'tcx>>
-) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ compute: impl FnOnce() -> InterpResult<'tcx, TyLayout<'tcx>>
+) -> InterpResult<'tcx, TyLayout<'tcx>> {
match layout {
None => compute(),
Some(layout) => {
fn try_read_immediate_from_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Option<Immediate<M::PointerTag>>> {
+ ) -> InterpResult<'tcx, Option<Immediate<M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
return Ok(None);
pub(crate) fn try_read_immediate(
&self,
src: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Result<Immediate<M::PointerTag>, MemPlace<M::PointerTag>>> {
+ ) -> InterpResult<'tcx, Result<Immediate<M::PointerTag>, MemPlace<M::PointerTag>>> {
Ok(match src.try_as_mplace() {
Ok(mplace) => {
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
pub fn read_immediate(
&self,
op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
if let Ok(imm) = self.try_read_immediate(op)? {
Ok(ImmTy { imm, layout: op.layout })
} else {
pub fn read_scalar(
&self,
op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
+ ) -> InterpResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
Ok(self.read_immediate(op)?.to_scalar_or_undef())
}
pub fn read_str(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, &str> {
+ ) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
let str = ::std::str::from_utf8(bytes)
&self,
op: OpTy<'tcx, M::PointerTag>,
field: u64,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace() {
Ok(mplace) => {
// The easy case
&self,
op: OpTy<'tcx, M::PointerTag>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace() {
Ok(mplace) => {
&self,
base: OpTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.operand_field(base, field.index() as u64)?,
frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
local: mir::Local,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
assert_ne!(local, mir::RETURN_PLACE);
let layout = self.layout_of_local(frame, local, layout)?;
let op = if layout.is_zst() {
pub fn place_to_op(
&self,
place: PlaceTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let op = match *place {
Place::Ptr(mplace) => {
Operand::Indirect(mplace)
&self,
mir_place: &mir::Place<'tcx>,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Place;
use rustc::mir::PlaceBase;
&self,
mir_op: &mir::Operand<'tcx>,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Operand::*;
let op = match *mir_op {
// FIXME: do some more logic on `move` to invalidate the old location
pub(super) fn eval_operands(
&self,
ops: &[mir::Operand<'tcx>],
- ) -> EvalResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
+ ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
ops.into_iter()
.map(|op| self.eval_operand(op, None))
.collect()
&self,
val: &'tcx ty::Const<'tcx>,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let tag_scalar = |scalar| match scalar {
Scalar::Ptr(ptr) => Scalar::Ptr(self.tag_static_base_pointer(ptr)),
Scalar::Raw { data, size } => Scalar::Raw { data, size },
pub fn read_discriminant(
&self,
rval: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, (u128, VariantIdx)> {
+ ) -> InterpResult<'tcx, (u128, VariantIdx)> {
trace!("read_discriminant_value {:#?}", rval.layout);
let (discr_kind, discr_index) = match rval.layout.variants {
use syntax::ast::FloatTy;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
-use rustc::mir::interpret::{EvalResult, Scalar};
+use rustc::mir::interpret::{InterpResult, Scalar};
use super::{InterpretCx, PlaceTy, Immediate, Machine, ImmTy};
left: ImmTy<'tcx, M::PointerTag>,
right: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let (val, overflowed) = self.binary_op(op, left, right)?;
let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
self.write_immediate(val, dest)
left: ImmTy<'tcx, M::PointerTag>,
right: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let (val, _overflowed) = self.binary_op(op, left, right)?;
self.write_scalar(val, dest)
}
bin_op: mir::BinOp,
l: char,
r: char,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
let res = match bin_op {
bin_op: mir::BinOp,
l: bool,
r: bool,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
let res = match bin_op {
// passing in raw bits
l: u128,
r: u128,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
macro_rules! float_math {
left_layout: TyLayout<'tcx>,
r: u128,
right_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
// Shift ops can have an RHS with a different numeric type.
bin_op: mir::BinOp,
left: ImmTy<'tcx, M::PointerTag>,
right: ImmTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, *left, left.layout.ty, *right, right.layout.ty);
&self,
un_op: mir::UnOp,
val: ImmTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::mir::UnOp::*;
let layout = val.layout;
use rustc::ty::TypeFoldable;
use super::{
- GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic,
+ GlobalId, AllocId, Allocation, Scalar, InterpResult, Pointer, PointerArithmetic,
InterpretCx, Machine, AllocMap, AllocationExtra,
RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind, LocalValue
};
/// metact the ptr part of the mplace
#[inline(always)]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
// At this point, we forget about the alignment information --
// the place has been turned into a reference, and no matter where it came from,
// it now must be aligned.
offset: Size,
meta: Option<Scalar<Tag>>,
cx: &impl HasDataLayout,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
Ok(MemPlace {
ptr: self.ptr.ptr_offset(offset, cx)?,
align: self.align.restrict_for_offset(offset),
meta: Option<Scalar<Tag>>,
layout: TyLayout<'tcx>,
cx: &impl HasDataLayout,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
mplace: self.mplace.offset(offset, meta, cx)?,
layout,
}
#[inline]
- pub(super) fn len(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
+ pub(super) fn len(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.sty {
}
#[inline]
- pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub(super) fn vtable(self) -> InterpResult<'tcx, Pointer<Tag>> {
match self.layout.ty.sty {
ty::Dynamic(..) => self.mplace.meta.unwrap().to_ptr(),
_ => bug!("vtable not supported on type {:?}", self.layout.ty),
}
#[inline]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
self.to_mem_place().to_ptr()
}
}
pub fn ref_to_mplace(
&self,
val: ImmTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty;
let layout = self.layout_of(pointee_type)?;
pub fn deref_operand(
&self,
src: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
self.ref_to_mplace(val)
&self,
base: MPlaceTy<'tcx, M::PointerTag>,
field: u64,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
let offset = match base.layout.fields {
layout::FieldPlacement::Arbitrary { ref offsets, .. } =>
&self,
base: MPlaceTy<'tcx, Tag>,
) ->
- EvalResult<'tcx, impl Iterator<Item=EvalResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
+ InterpResult<'tcx, impl Iterator<Item=InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
{
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let stride = match base.layout.fields {
base: MPlaceTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
assert!(from <= len - to);
&self,
base: MPlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
assert!(base.meta.is_none());
Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
&self,
base: MPlaceTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.mplace_field(base, field.index() as u64)?,
&mut self,
base: PlaceTy<'tcx, M::PointerTag>,
field: u64,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let mplace = self.force_allocation(base)?;
&self,
base: PlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
Ok(match base.place {
Place::Ptr(mplace) =>
&mut self,
base: PlaceTy<'tcx, M::PointerTag>,
proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.place_field(base, field.index() as u64)?,
pub(super) fn eval_static_to_mplace(
&self,
place_static: &mir::Static<'tcx>
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::StaticKind;
Ok(match place_static.kind {
pub fn eval_place(
&mut self,
mir_place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::PlaceBase;
mir_place.iterate(|place_base, place_projection| {
&mut self,
val: impl Into<ScalarMaybeUndef<M::PointerTag>>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
}
&mut self,
src: Immediate<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.write_immediate_no_validate(src, dest)?;
if M::enforce_validity(self) {
&mut self,
src: Immediate<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
&mut self,
value: Immediate<M::PointerTag>,
dest: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest)?;
if M::enforce_validity(self) {
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
assert!(src.layout.details == dest.layout.details,
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if src.layout.details == dest.layout.details {
// Fast path: Just use normal `copy_op`
return self.copy_op(src, dest);
&mut self,
place: PlaceTy<'tcx, M::PointerTag>,
meta: Option<Scalar<M::PointerTag>>,
- ) -> EvalResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
let (mplace, size) = match place.place {
Place::Local { frame, local } => {
match self.stack[frame].locals[local].access_mut()? {
pub fn force_allocation(
&mut self,
place: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
Ok(self.force_allocation_maybe_sized(place, None)?.0)
}
&mut self,
variant_index: VariantIdx,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
match dest.layout.variants {
layout::Variants::Single { index } => {
assert_eq!(index, variant_index);
pub fn raw_const_to_mplace(
&self,
raw: RawConst<'tcx>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// This must be an allocation in `tcx`
assert!(self.tcx.alloc_map.lock().get(raw.alloc_id).is_some());
let ptr = self.tag_static_base_pointer(Pointer::from(raw.alloc_id));
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
- -> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
+ -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable()?; // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;
use rustc::mir::interpret::{
AllocId, Pointer, Scalar,
Relocations, Allocation, UndefMask,
- EvalResult, InterpError,
+ InterpResult, InterpError,
};
use rustc::ty::{self, TyCtxt};
span: Span,
memory: &Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: &[Frame<'mir, 'tcx>],
- ) -> EvalResult<'tcx, ()> {
+ ) -> InterpResult<'tcx, ()> {
// Compute stack's hash before copying anything
let mut hcx = tcx.get_stable_hashing_context();
let mut hasher = StableHasher::<u64>::new();
use rustc::mir;
use rustc::ty::layout::LayoutOf;
-use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
+use rustc::mir::interpret::{InterpResult, Scalar, PointerArithmetic};
use super::{InterpretCx, Machine};
}
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> {
- pub fn run(&mut self) -> EvalResult<'tcx> {
+ pub fn run(&mut self) -> InterpResult<'tcx> {
while self.step()? {}
Ok(())
}
/// Returns `true` as long as there are more things to do.
///
/// This is used by [priroda](https://github.com/oli-obk/priroda)
- pub fn step(&mut self) -> EvalResult<'tcx, bool> {
+ pub fn step(&mut self) -> InterpResult<'tcx, bool> {
if self.stack.is_empty() {
return Ok(false);
}
Ok(true)
}
- fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
+ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", stmt);
use rustc::mir::StatementKind::*;
&mut self,
rvalue: &mir::Rvalue<'tcx>,
place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let dest = self.eval_place(place)?;
use rustc::mir::Rvalue::*;
Ok(())
}
- fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
+ fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", terminator.kind);
self.tcx.span = terminator.source_info.span;
self.memory.tcx.span = terminator.source_info.span;
use syntax::source_map::Span;
use rustc_target::spec::abi::Abi;
-use rustc::mir::interpret::{EvalResult, PointerArithmetic, InterpError, Scalar};
+use rustc::mir::interpret::{InterpResult, PointerArithmetic, InterpError, Scalar};
use super::{
InterpretCx, Machine, Immediate, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup
};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> {
#[inline]
- pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> EvalResult<'tcx> {
+ pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
if let Some(target) = target {
self.frame_mut().block = target;
self.frame_mut().stmt = 0;
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
use rustc::mir::TerminatorKind::*;
match terminator.kind {
Return => {
rust_abi: bool,
caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>,
callee_arg: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if rust_abi && callee_arg.layout.is_zst() {
// Nothing to do.
trace!("Skipping callee ZST");
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
ret: Option<mir::BasicBlock>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
match instance.def {
.chain((0..untuple_arg.layout.fields.count()).into_iter()
.map(|i| self.operand_field(untuple_arg, i as u64))
)
- .collect::<EvalResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?)
+ .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?)
} else {
// Plain arg passing
Cow::from(args)
instance: ty::Instance<'tcx>,
span: Span,
target: mir::BasicBlock,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
// We take the address of the object. This may well be unaligned, which is fine
// for us here. However, unaligned accesses will probably make the actual drop
use rustc::ty::{self, Ty, Instance};
use rustc::ty::layout::{Size, Align, LayoutOf};
-use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
+use rustc::mir::interpret::{Scalar, Pointer, InterpResult, PointerArithmetic};
use super::{InterpretCx, InterpError, Machine, MemoryKind};
&mut self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
- ) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
- ) -> EvalResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let drop_fn = self.memory
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
- ) -> EvalResult<'tcx, (Size, Align)> {
+ ) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let alloc = self.memory.get(vtable.alloc_id)?;
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
- Scalar, GlobalAlloc, EvalResult, InterpError, CheckInAllocMsg,
+ Scalar, GlobalAlloc, InterpResult, InterpError, CheckInAllocMsg,
};
use super::{
&mut self,
new_op: OpTy<'tcx, M::PointerTag>,
elem: PathElem,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// Remember the old state
let path_len = self.path.len();
// Perform operation
old_op: OpTy<'tcx, M::PointerTag>,
field: usize,
new_op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let elem = self.aggregate_field_path_elem(old_op.layout, field);
self.visit_elem(new_op, elem)
}
old_op: OpTy<'tcx, M::PointerTag>,
variant_id: VariantIdx,
new_op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.sty {
ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
// Generators also have variants
}
#[inline]
- fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> EvalResult<'tcx>
+ fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
{
trace!("visit_value: {:?}, {:?}", *op, op.layout);
// Translate some possible errors to something nicer.
}
}
- fn visit_primitive(&mut self, value: OpTy<'tcx, M::PointerTag>) -> EvalResult<'tcx>
+ fn visit_primitive(&mut self, value: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
{
let value = self.ecx.read_immediate(value)?;
// Go over all the primitive types
Ok(())
}
- fn visit_uninhabited(&mut self) -> EvalResult<'tcx>
+ fn visit_uninhabited(&mut self) -> InterpResult<'tcx>
{
validation_failure!("a value of an uninhabited type", self.path)
}
&mut self,
op: OpTy<'tcx, M::PointerTag>,
layout: &layout::Scalar,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let value = self.ecx.read_scalar(op)?;
// Determine the allowed range
let (lo, hi) = layout.valid_range.clone().into_inner();
fn visit_aggregate(
&mut self,
op: OpTy<'tcx, M::PointerTag>,
- fields: impl Iterator<Item=EvalResult<'tcx, Self::V>>,
- ) -> EvalResult<'tcx> {
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
match op.layout.ty.sty {
ty::Str => {
let mplace = op.to_mem_place(); // strings are never immediate
path: Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>>>,
const_mode: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("validate_operand: {:?}, {:?}", *op, op.layout.ty);
// Construct a visitor
use rustc::ty::layout::{self, TyLayout, VariantIdx};
use rustc::ty;
use rustc::mir::interpret::{
- EvalResult,
+ InterpResult,
};
use super::{
fn to_op(
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>>;
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
/// Creates this from an `MPlaceTy`.
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, Self>;
+ ) -> InterpResult<'tcx, Self>;
/// Projects to the n-th field.
fn project_field(
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
field: u64,
- ) -> EvalResult<'tcx, Self>;
+ ) -> InterpResult<'tcx, Self>;
}
// Operands and memory-places are both values.
fn to_op(
self,
_ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
Ok(self)
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.operand_downcast(self, variant)
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
field: u64,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.operand_field(self, field)
}
}
fn to_op(
self,
_ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
Ok(self.into())
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.mplace_downcast(self, variant)
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
field: u64,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.mplace_field(self, field)
}
}
// Recursive actions, ready to be overloaded.
/// Visits the given value, dispatching as appropriate to more specialized visitors.
#[inline(always)]
- fn visit_value(&mut self, v: Self::V) -> EvalResult<'tcx>
+ fn visit_value(&mut self, v: Self::V) -> InterpResult<'tcx>
{
self.walk_value(v)
}
/// Visits the given value as a union. No automatic recursion can happen here.
#[inline(always)]
- fn visit_union(&mut self, _v: Self::V) -> EvalResult<'tcx>
+ fn visit_union(&mut self, _v: Self::V) -> InterpResult<'tcx>
{
Ok(())
}
/// Visits this value as an aggregate, you are getting an iterator yielding
- /// all the fields (still in an `EvalResult`, you have to do error handling yourself).
+ /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
/// Recurses into the fields.
#[inline(always)]
fn visit_aggregate(
&mut self,
v: Self::V,
- fields: impl Iterator<Item=EvalResult<'tcx, Self::V>>,
- ) -> EvalResult<'tcx> {
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
self.walk_aggregate(v, fields)
}
_old_val: Self::V,
_field: usize,
new_val: Self::V,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.visit_value(new_val)
}
_old_val: Self::V,
_variant: VariantIdx,
new_val: Self::V,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.visit_value(new_val)
}
/// it is meant to provide the chance for additional checks when a value of uninhabited
/// layout is detected.
#[inline(always)]
- fn visit_uninhabited(&mut self) -> EvalResult<'tcx>
+ fn visit_uninhabited(&mut self) -> InterpResult<'tcx>
{ Ok(()) }
/// Called whenever we reach a value with scalar layout.
/// We do NOT provide a `ScalarMaybeUndef` here to avoid accessing memory if the
/// it is meant to provide the chance for additional checks when a value of scalar
/// layout is detected.
#[inline(always)]
- fn visit_scalar(&mut self, _v: Self::V, _layout: &layout::Scalar) -> EvalResult<'tcx>
+ fn visit_scalar(&mut self, _v: Self::V, _layout: &layout::Scalar) -> InterpResult<'tcx>
{ Ok(()) }
/// Called whenever we reach a value of primitive type. There can be no recursion
/// We do *not* provide an `ImmTy` here because some implementations might want
/// to write to the place this primitive lives in.
#[inline(always)]
- fn visit_primitive(&mut self, _v: Self::V) -> EvalResult<'tcx>
+ fn visit_primitive(&mut self, _v: Self::V) -> InterpResult<'tcx>
{ Ok(()) }
// Default recursors. Not meant to be overloaded.
fn walk_aggregate(
&mut self,
v: Self::V,
- fields: impl Iterator<Item=EvalResult<'tcx, Self::V>>,
- ) -> EvalResult<'tcx> {
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
// Now iterate over it.
for (idx, field_val) in fields.enumerate() {
self.visit_field(v, idx, field_val?)?;
}
Ok(())
}
- fn walk_value(&mut self, v: Self::V) -> EvalResult<'tcx>
+ fn walk_value(&mut self, v: Self::V) -> InterpResult<'tcx>
{
trace!("walk_value: type: {}", v.layout().ty);
// If this is a multi-variant layout, we have to find the right one and proceed with
layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
// FIXME: We collect in a vec because otherwise there are lifetime
// errors: Projecting to a field needs access to `ecx`.
- let fields: Vec<EvalResult<'tcx, Self::V>> =
+ let fields: Vec<InterpResult<'tcx, Self::V>> =
(0..offsets.len()).map(|i| {
v.project_field(self.ecx(), i as u64)
})
use rustc::mir::visit::{
Visitor, PlaceContext, MutatingUseContext, MutVisitor, NonMutatingUseContext,
};
-use rustc::mir::interpret::{InterpError, Scalar, GlobalId, EvalResult};
+use rustc::mir::interpret::{InterpError, Scalar, GlobalId, InterpResult};
use rustc::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
use syntax_pos::{Span, DUMMY_SP};
use rustc::ty::subst::InternalSubsts;
f: F
) -> Option<T>
where
- F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
+ F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
{
self.ecx.tcx.span = source_info.span;
let lint_root = match self.source_scope_local_data {