]> git.lizzy.rs Git - rust.git/commitdiff
Move miri base code to rustc::mir::interpret
authorOliver Schneider <git-spam-no-reply9815368754983@oli-obk.de>
Fri, 29 Sep 2017 10:43:43 +0000 (12:43 +0200)
committerOliver Schneider <git-spam-no-reply9815368754983@oli-obk.de>
Fri, 29 Sep 2017 10:49:21 +0000 (12:49 +0200)
32 files changed:
src/librustc/mir/interpret/cast.rs [new file with mode: 0644]
src/librustc/mir/interpret/const_eval.rs [new file with mode: 0644]
src/librustc/mir/interpret/error.rs [new file with mode: 0644]
src/librustc/mir/interpret/eval_context.rs [new file with mode: 0644]
src/librustc/mir/interpret/lvalue.rs [new file with mode: 0644]
src/librustc/mir/interpret/machine.rs [new file with mode: 0644]
src/librustc/mir/interpret/memory.rs [new file with mode: 0644]
src/librustc/mir/interpret/mod.rs [new file with mode: 0644]
src/librustc/mir/interpret/operator.rs [new file with mode: 0644]
src/librustc/mir/interpret/range_map.rs [new file with mode: 0644]
src/librustc/mir/interpret/step.rs [new file with mode: 0644]
src/librustc/mir/interpret/terminator/drop.rs [new file with mode: 0644]
src/librustc/mir/interpret/terminator/mod.rs [new file with mode: 0644]
src/librustc/mir/interpret/traits.rs [new file with mode: 0644]
src/librustc/mir/interpret/validation.rs [new file with mode: 0644]
src/librustc/mir/interpret/value.rs [new file with mode: 0644]
src/librustc_mir/interpret/cast.rs [deleted file]
src/librustc_mir/interpret/const_eval.rs [deleted file]
src/librustc_mir/interpret/error.rs [deleted file]
src/librustc_mir/interpret/eval_context.rs [deleted file]
src/librustc_mir/interpret/lvalue.rs [deleted file]
src/librustc_mir/interpret/machine.rs [deleted file]
src/librustc_mir/interpret/memory.rs [deleted file]
src/librustc_mir/interpret/mod.rs [deleted file]
src/librustc_mir/interpret/operator.rs [deleted file]
src/librustc_mir/interpret/range_map.rs [deleted file]
src/librustc_mir/interpret/step.rs [deleted file]
src/librustc_mir/interpret/terminator/drop.rs [deleted file]
src/librustc_mir/interpret/terminator/mod.rs [deleted file]
src/librustc_mir/interpret/traits.rs [deleted file]
src/librustc_mir/interpret/validation.rs [deleted file]
src/librustc_mir/interpret/value.rs [deleted file]

diff --git a/src/librustc/mir/interpret/cast.rs b/src/librustc/mir/interpret/cast.rs
new file mode 100644 (file)
index 0000000..5ae7c9d
--- /dev/null
@@ -0,0 +1,122 @@
+use rustc::ty::{self, Ty};
+use syntax::ast::{FloatTy, IntTy, UintTy};
+
+use super::{PrimVal, EvalContext, EvalResult, MemoryPointer, PointerArithmetic, Machine};
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    pub(super) fn cast_primval(
+        &self,
+        val: PrimVal,
+        src_ty: Ty<'tcx>,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, PrimVal> {
+        trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty);
+        let src_kind = self.ty_to_primval_kind(src_ty)?;
+
+        match val {
+            PrimVal::Undef => Ok(PrimVal::Undef),
+            PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty),
+            val @ PrimVal::Bytes(_) => {
+                use super::PrimValKind::*;
+                match src_kind {
+                    F32 => self.cast_from_float(val.to_f32()? as f64, dest_ty),
+                    F64 => self.cast_from_float(val.to_f64()?, dest_ty),
+
+                    I8 | I16 | I32 | I64 | I128 => {
+                        self.cast_from_signed_int(val.to_i128()?, dest_ty)
+                    }
+
+                    Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => {
+                        self.cast_from_int(val.to_u128()?, dest_ty, false)
+                    }
+                }
+            }
+        }
+    }
+
+    fn cast_from_signed_int(&self, val: i128, ty: ty::Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
+        self.cast_from_int(val as u128, ty, val < 0)
+    }
+
+    fn int_to_int(&self, v: i128, ty: IntTy) -> u128 {
+        match ty {
+            IntTy::I8 => v as i8 as u128,
+            IntTy::I16 => v as i16 as u128,
+            IntTy::I32 => v as i32 as u128,
+            IntTy::I64 => v as i64 as u128,
+            IntTy::I128 => v as u128,
+            IntTy::Is => {
+                let ty = self.tcx.sess.target.isize_ty;
+                self.int_to_int(v, ty)
+            }
+        }
+    }
+    fn int_to_uint(&self, v: u128, ty: UintTy) -> u128 {
+        match ty {
+            UintTy::U8 => v as u8 as u128,
+            UintTy::U16 => v as u16 as u128,
+            UintTy::U32 => v as u32 as u128,
+            UintTy::U64 => v as u64 as u128,
+            UintTy::U128 => v,
+            UintTy::Us => {
+                let ty = self.tcx.sess.target.usize_ty;
+                self.int_to_uint(v, ty)
+            }
+        }
+    }
+
+    fn cast_from_int(
+        &self,
+        v: u128,
+        ty: ty::Ty<'tcx>,
+        negative: bool,
+    ) -> EvalResult<'tcx, PrimVal> {
+        trace!("cast_from_int: {}, {}, {}", v, ty, negative);
+        use rustc::ty::TypeVariants::*;
+        match ty.sty {
+            // Casts to bool are not permitted by rustc, no need to handle them here.
+            TyInt(ty) => Ok(PrimVal::Bytes(self.int_to_int(v as i128, ty))),
+            TyUint(ty) => Ok(PrimVal::Bytes(self.int_to_uint(v, ty))),
+
+            TyFloat(FloatTy::F64) if negative => Ok(PrimVal::from_f64(v as i128 as f64)),
+            TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)),
+            TyFloat(FloatTy::F32) if negative => Ok(PrimVal::from_f32(v as i128 as f32)),
+            TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)),
+
+            TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)),
+            TyChar => err!(InvalidChar(v)),
+
+            // No alignment check needed for raw pointers.  But we have to truncate to target ptr size.
+            TyRawPtr(_) => Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)),
+
+            _ => err!(Unimplemented(format!("int to {:?} cast", ty))),
+        }
+    }
+
+    fn cast_from_float(&self, val: f64, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
+        use rustc::ty::TypeVariants::*;
+        match ty.sty {
+            // Casting negative floats to unsigned integers yields zero.
+            TyUint(_) if val < 0.0 => self.cast_from_int(0, ty, false),
+            TyInt(_) if val < 0.0 => self.cast_from_int(val as i128 as u128, ty, true),
+
+            TyInt(_) | ty::TyUint(_) => self.cast_from_int(val as u128, ty, false),
+
+            TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(val)),
+            TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(val as f32)),
+            _ => err!(Unimplemented(format!("float to {:?} cast", ty))),
+        }
+    }
+
+    fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
+        use rustc::ty::TypeVariants::*;
+        match ty.sty {
+            // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
+            TyRawPtr(_) |
+            TyInt(IntTy::Is) |
+            TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)),
+            TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes),
+            _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))),
+        }
+    }
+}
diff --git a/src/librustc/mir/interpret/const_eval.rs b/src/librustc/mir/interpret/const_eval.rs
new file mode 100644 (file)
index 0000000..075880f
--- /dev/null
@@ -0,0 +1,259 @@
+use rustc::traits::Reveal;
+use rustc::ty::{self, TyCtxt, Ty, Instance, layout};
+use rustc::mir;
+
+use syntax::ast::Mutability;
+use syntax::codemap::Span;
+
+use super::{EvalResult, EvalError, EvalErrorKind, GlobalId, Lvalue, Value, PrimVal, EvalContext,
+            StackPopCleanup, PtrAndAlign, MemoryKind, ValTy};
+
+use rustc_const_math::ConstInt;
+
+use std::fmt;
+use std::error::Error;
+
+pub fn eval_body_as_primval<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    instance: Instance<'tcx>,
+) -> EvalResult<'tcx, (PrimVal, Ty<'tcx>)> {
+    let limits = super::ResourceLimits::default();
+    let mut ecx = EvalContext::<CompileTimeFunctionEvaluator>::new(tcx, limits, (), ());
+    let cid = GlobalId {
+        instance,
+        promoted: None,
+    };
+    if ecx.tcx.has_attr(instance.def_id(), "linkage") {
+        return Err(ConstEvalError::NotConst("extern global".to_string()).into());
+    }
+
+    let mir = ecx.load_mir(instance.def)?;
+    if !ecx.globals.contains_key(&cid) {
+        let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)?
+            .expect("unsized global");
+        let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?;
+        let ptr = ecx.memory.allocate(
+            size,
+            align,
+            MemoryKind::UninitializedStatic,
+        )?;
+        let aligned = !ecx.is_packed(mir.return_ty)?;
+        ecx.globals.insert(
+            cid,
+            PtrAndAlign {
+                ptr: ptr.into(),
+                aligned,
+            },
+        );
+        let mutable = !mir.return_ty.is_freeze(
+            ecx.tcx,
+            ty::ParamEnv::empty(Reveal::All),
+            mir.span,
+        );
+        let mutability = if mutable {
+            Mutability::Mutable
+        } else {
+            Mutability::Immutable
+        };
+        let cleanup = StackPopCleanup::MarkStatic(mutability);
+        let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
+        trace!("const_eval: pushing stack frame for global: {}", name);
+        ecx.push_stack_frame(
+            instance,
+            mir.span,
+            mir,
+            Lvalue::from_ptr(ptr),
+            cleanup,
+        )?;
+
+        while ecx.step()? {}
+    }
+    let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached"));
+    let valty = ValTy {
+        value,
+        ty: mir.return_ty,
+    };
+    Ok((ecx.value_to_primval(valty)?, mir.return_ty))
+}
+
+pub fn eval_body_as_integer<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    instance: Instance<'tcx>,
+) -> EvalResult<'tcx, ConstInt> {
+    let (prim, ty) = eval_body_as_primval(tcx, instance)?;
+    let prim = prim.to_bytes()?;
+    use syntax::ast::{IntTy, UintTy};
+    use rustc::ty::TypeVariants::*;
+    use rustc_const_math::{ConstIsize, ConstUsize};
+    Ok(match ty.sty {
+        TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8),
+        TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16),
+        TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32),
+        TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64),
+        TyInt(IntTy::I128) => ConstInt::I128(prim as i128),
+        TyInt(IntTy::Is) => ConstInt::Isize(
+            ConstIsize::new(prim as i128 as i64, tcx.sess.target.isize_ty)
+                .expect("miri should already have errored"),
+        ),
+        TyUint(UintTy::U8) => ConstInt::U8(prim as u8),
+        TyUint(UintTy::U16) => ConstInt::U16(prim as u16),
+        TyUint(UintTy::U32) => ConstInt::U32(prim as u32),
+        TyUint(UintTy::U64) => ConstInt::U64(prim as u64),
+        TyUint(UintTy::U128) => ConstInt::U128(prim),
+        TyUint(UintTy::Us) => ConstInt::Usize(
+            ConstUsize::new(prim as u64, tcx.sess.target.usize_ty)
+                .expect("miri should already have errored"),
+        ),
+        _ => {
+            return Err(
+                ConstEvalError::NeedsRfc(
+                    "evaluating anything other than isize/usize during typeck".to_string(),
+                ).into(),
+            )
+        }
+    })
+}
+
+struct CompileTimeFunctionEvaluator;
+
+impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
+    fn into(self) -> EvalError<'tcx> {
+        EvalErrorKind::MachineError(Box::new(self)).into()
+    }
+}
+
+#[derive(Clone, Debug)]
+enum ConstEvalError {
+    NeedsRfc(String),
+    NotConst(String),
+}
+
+impl fmt::Display for ConstEvalError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        use self::ConstEvalError::*;
+        match *self {
+            NeedsRfc(ref msg) => {
+                write!(
+                    f,
+                    "\"{}\" needs an rfc before being allowed inside constants",
+                    msg
+                )
+            }
+            NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg),
+        }
+    }
+}
+
+impl Error for ConstEvalError {
+    fn description(&self) -> &str {
+        use self::ConstEvalError::*;
+        match *self {
+            NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants",
+            NotConst(_) => "this feature is not compatible with constant evaluation",
+        }
+    }
+
+    fn cause(&self) -> Option<&Error> {
+        None
+    }
+}
+
+impl<'tcx> super::Machine<'tcx> for CompileTimeFunctionEvaluator {
+    type Data = ();
+    type MemoryData = ();
+    type MemoryKinds = !;
+    fn eval_fn_call<'a>(
+        ecx: &mut EvalContext<'a, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        destination: Option<(Lvalue, mir::BasicBlock)>,
+        _args: &[ValTy<'tcx>],
+        span: Span,
+        _sig: ty::FnSig<'tcx>,
+    ) -> EvalResult<'tcx, bool> {
+        if !ecx.tcx.is_const_fn(instance.def_id()) {
+            return Err(
+                ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(),
+            );
+        }
+        let mir = match ecx.load_mir(instance.def) {
+            Ok(mir) => mir,
+            Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
+                // some simple things like `malloc` might get accepted in the future
+                return Err(
+                    ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path))
+                        .into(),
+                );
+            }
+            Err(other) => return Err(other),
+        };
+        let (return_lvalue, return_to_block) = match destination {
+            Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)),
+            None => (Lvalue::undef(), StackPopCleanup::None),
+        };
+
+        ecx.push_stack_frame(
+            instance,
+            span,
+            mir,
+            return_lvalue,
+            return_to_block,
+        )?;
+
+        Ok(false)
+    }
+
+    fn call_intrinsic<'a>(
+        _ecx: &mut EvalContext<'a, 'tcx, Self>,
+        _instance: ty::Instance<'tcx>,
+        _args: &[ValTy<'tcx>],
+        _dest: Lvalue,
+        _dest_ty: Ty<'tcx>,
+        _dest_layout: &'tcx layout::Layout,
+        _target: mir::BasicBlock,
+    ) -> EvalResult<'tcx> {
+        Err(
+            ConstEvalError::NeedsRfc("calling intrinsics".to_string()).into(),
+        )
+    }
+
+    fn try_ptr_op<'a>(
+        _ecx: &EvalContext<'a, 'tcx, Self>,
+        _bin_op: mir::BinOp,
+        left: PrimVal,
+        _left_ty: Ty<'tcx>,
+        right: PrimVal,
+        _right_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> {
+        if left.is_bytes() && right.is_bytes() {
+            Ok(None)
+        } else {
+            Err(
+                ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(),
+            )
+        }
+    }
+
+    fn mark_static_initialized(m: !) -> EvalResult<'tcx> {
+        m
+    }
+
+    fn box_alloc<'a>(
+        _ecx: &mut EvalContext<'a, 'tcx, Self>,
+        _ty: ty::Ty<'tcx>,
+        _dest: Lvalue,
+    ) -> EvalResult<'tcx> {
+        Err(
+            ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(),
+        )
+    }
+
+    fn global_item_with_linkage<'a>(
+        _ecx: &mut EvalContext<'a, 'tcx, Self>,
+        _instance: ty::Instance<'tcx>,
+        _mutability: Mutability,
+    ) -> EvalResult<'tcx> {
+        Err(
+            ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(),
+        )
+    }
+}
diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs
new file mode 100644 (file)
index 0000000..96911c1
--- /dev/null
@@ -0,0 +1,313 @@
+use std::error::Error;
+use std::{fmt, env};
+
+use rustc::mir;
+use rustc::ty::{FnSig, Ty, layout};
+
+use super::{
+    MemoryPointer, Lock, AccessKind
+};
+
+use rustc_const_math::ConstMathErr;
+use syntax::codemap::Span;
+use backtrace::Backtrace;
+
+#[derive(Debug)]
+pub struct EvalError<'tcx> {
+    pub kind: EvalErrorKind<'tcx>,
+    pub backtrace: Option<Backtrace>,
+}
+
+impl<'tcx> From<EvalErrorKind<'tcx>> for EvalError<'tcx> {
+    fn from(kind: EvalErrorKind<'tcx>) -> Self {
+        let backtrace = match env::var("RUST_BACKTRACE") {
+            Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()),
+            _ => None
+        };
+        EvalError {
+            kind,
+            backtrace,
+        }
+    }
+}
+
+#[derive(Debug)]
+pub enum EvalErrorKind<'tcx> {
+    /// This variant is used by machines to signal their own errors that do not
+    /// match an existing variant
+    MachineError(Box<Error>),
+    FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>),
+    NoMirFor(String),
+    UnterminatedCString(MemoryPointer),
+    DanglingPointerDeref,
+    DoubleFree,
+    InvalidMemoryAccess,
+    InvalidFunctionPointer,
+    InvalidBool,
+    InvalidDiscriminant,
+    PointerOutOfBounds {
+        ptr: MemoryPointer,
+        access: bool,
+        allocation_size: u64,
+    },
+    InvalidNullPointerUsage,
+    ReadPointerAsBytes,
+    ReadBytesAsPointer,
+    InvalidPointerMath,
+    ReadUndefBytes,
+    DeadLocal,
+    InvalidBoolOp(mir::BinOp),
+    Unimplemented(String),
+    DerefFunctionPointer,
+    ExecuteMemory,
+    ArrayIndexOutOfBounds(Span, u64, u64),
+    Math(Span, ConstMathErr),
+    Intrinsic(String),
+    OverflowingMath,
+    InvalidChar(u128),
+    OutOfMemory {
+        allocation_size: u64,
+        memory_size: u64,
+        memory_usage: u64,
+    },
+    ExecutionTimeLimitReached,
+    StackFrameLimitReached,
+    OutOfTls,
+    TlsOutOfBounds,
+    AbiViolation(String),
+    AlignmentCheckFailed {
+        required: u64,
+        has: u64,
+    },
+    MemoryLockViolation {
+        ptr: MemoryPointer,
+        len: u64,
+        frame: usize,
+        access: AccessKind,
+        lock: Lock,
+    },
+    MemoryAcquireConflict {
+        ptr: MemoryPointer,
+        len: u64,
+        kind: AccessKind,
+        lock: Lock,
+    },
+    InvalidMemoryLockRelease {
+        ptr: MemoryPointer,
+        len: u64,
+        frame: usize,
+        lock: Lock,
+    },
+    DeallocatedLockedMemory {
+        ptr: MemoryPointer,
+        lock: Lock,
+    },
+    ValidationFailure(String),
+    CalledClosureAsFunction,
+    VtableForArgumentlessMethod,
+    ModifiedConstantMemory,
+    AssumptionNotHeld,
+    InlineAsm,
+    TypeNotPrimitive(Ty<'tcx>),
+    ReallocatedWrongMemoryKind(String, String),
+    DeallocatedWrongMemoryKind(String, String),
+    ReallocateNonBasePtr,
+    DeallocateNonBasePtr,
+    IncorrectAllocationInformation,
+    Layout(layout::LayoutError<'tcx>),
+    HeapAllocZeroBytes,
+    HeapAllocNonPowerOfTwoAlignment(u64),
+    Unreachable,
+    Panic,
+    ReadFromReturnPointer,
+    PathNotFound(Vec<String>),
+}
+
+pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
+
+impl<'tcx> Error for EvalError<'tcx> {
+    fn description(&self) -> &str {
+        use self::EvalErrorKind::*;
+        match self.kind {
+            MachineError(ref inner) => inner.description(),
+            FunctionPointerTyMismatch(..) =>
+                "tried to call a function through a function pointer of a different type",
+            InvalidMemoryAccess =>
+                "tried to access memory through an invalid pointer",
+            DanglingPointerDeref =>
+                "dangling pointer was dereferenced",
+            DoubleFree =>
+                "tried to deallocate dangling pointer",
+            InvalidFunctionPointer =>
+                "tried to use a function pointer after offsetting it",
+            InvalidBool =>
+                "invalid boolean value read",
+            InvalidDiscriminant =>
+                "invalid enum discriminant value read",
+            PointerOutOfBounds { .. } =>
+                "pointer offset outside bounds of allocation",
+            InvalidNullPointerUsage =>
+                "invalid use of NULL pointer",
+            MemoryLockViolation { .. } =>
+                "memory access conflicts with lock",
+            MemoryAcquireConflict { .. } =>
+                "new memory lock conflicts with existing lock",
+            ValidationFailure(..) =>
+                "type validation failed",
+            InvalidMemoryLockRelease { .. } =>
+                "invalid attempt to release write lock",
+            DeallocatedLockedMemory { .. } =>
+                "tried to deallocate memory in conflict with a lock",
+            ReadPointerAsBytes =>
+                "a raw memory access tried to access part of a pointer value as raw bytes",
+            ReadBytesAsPointer =>
+                "a memory access tried to interpret some bytes as a pointer",
+            InvalidPointerMath =>
+                "attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations",
+            ReadUndefBytes =>
+                "attempted to read undefined bytes",
+            DeadLocal =>
+                "tried to access a dead local variable",
+            InvalidBoolOp(_) =>
+                "invalid boolean operation",
+            Unimplemented(ref msg) => msg,
+            DerefFunctionPointer =>
+                "tried to dereference a function pointer",
+            ExecuteMemory =>
+                "tried to treat a memory pointer as a function pointer",
+            ArrayIndexOutOfBounds(..) =>
+                "array index out of bounds",
+            Math(..) =>
+                "mathematical operation failed",
+            Intrinsic(..) =>
+                "intrinsic failed",
+            OverflowingMath =>
+                "attempted to do overflowing math",
+            NoMirFor(..) =>
+                "mir not found",
+            InvalidChar(..) =>
+                "tried to interpret an invalid 32-bit value as a char",
+            OutOfMemory{..} =>
+                "could not allocate more memory",
+            ExecutionTimeLimitReached =>
+                "reached the configured maximum execution time",
+            StackFrameLimitReached =>
+                "reached the configured maximum number of stack frames",
+            OutOfTls =>
+                "reached the maximum number of representable TLS keys",
+            TlsOutOfBounds =>
+                "accessed an invalid (unallocated) TLS key",
+            AbiViolation(ref msg) => msg,
+            AlignmentCheckFailed{..} =>
+                "tried to execute a misaligned read or write",
+            CalledClosureAsFunction =>
+                "tried to call a closure through a function pointer",
+            VtableForArgumentlessMethod =>
+                "tried to call a vtable function without arguments",
+            ModifiedConstantMemory =>
+                "tried to modify constant memory",
+            AssumptionNotHeld =>
+                "`assume` argument was false",
+            InlineAsm =>
+                "miri does not support inline assembly",
+            TypeNotPrimitive(_) =>
+                "expected primitive type, got nonprimitive",
+            ReallocatedWrongMemoryKind(_, _) =>
+                "tried to reallocate memory from one kind to another",
+            DeallocatedWrongMemoryKind(_, _) =>
+                "tried to deallocate memory of the wrong kind",
+            ReallocateNonBasePtr =>
+                "tried to reallocate with a pointer not to the beginning of an existing object",
+            DeallocateNonBasePtr =>
+                "tried to deallocate with a pointer not to the beginning of an existing object",
+            IncorrectAllocationInformation =>
+                "tried to deallocate or reallocate using incorrect alignment or size",
+            Layout(_) =>
+                "rustc layout computation failed",
+            UnterminatedCString(_) =>
+                "attempted to get length of a null terminated string, but no null found before end of allocation",
+            HeapAllocZeroBytes =>
+                "tried to re-, de- or allocate zero bytes on the heap",
+            HeapAllocNonPowerOfTwoAlignment(_) =>
+                "tried to re-, de-, or allocate heap memory with alignment that is not a power of two",
+            Unreachable =>
+                "entered unreachable code",
+            Panic =>
+                "the evaluated program panicked",
+            ReadFromReturnPointer =>
+                "tried to read from the return pointer",
+            EvalErrorKind::PathNotFound(_) =>
+                "a path could not be resolved, maybe the crate is not loaded",
+        }
+    }
+
+    fn cause(&self) -> Option<&Error> {
+        use self::EvalErrorKind::*;
+        match self.kind {
+            MachineError(ref inner) => Some(&**inner),
+            _ => None,
+        }
+    }
+}
+
+impl<'tcx> fmt::Display for EvalError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        use self::EvalErrorKind::*;
+        match self.kind {
+            PointerOutOfBounds { ptr, access, allocation_size } => {
+                write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}",
+                       if access { "memory access" } else { "pointer computed" },
+                       ptr.offset, ptr.alloc_id, allocation_size)
+            },
+            MemoryLockViolation { ptr, len, frame, access, ref lock } => {
+                write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
+                       access, frame, ptr, len, lock)
+            }
+            MemoryAcquireConflict { ptr, len, kind, ref lock } => {
+                write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}",
+                       kind, ptr, len, lock)
+            }
+            InvalidMemoryLockRelease { ptr, len, frame, ref lock } => {
+                write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}",
+                       frame, ptr, len, lock)
+            }
+            DeallocatedLockedMemory { ptr, ref lock } => {
+                write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}",
+                       ptr, lock)
+            }
+            ValidationFailure(ref err) => {
+                write!(f, "type validation failed: {}", err)
+            }
+            NoMirFor(ref func) => write!(f, "no mir for `{}`", func),
+            FunctionPointerTyMismatch(sig, got) =>
+                write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got),
+            ArrayIndexOutOfBounds(span, len, index) =>
+                write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span),
+            ReallocatedWrongMemoryKind(ref old, ref new) =>
+                write!(f, "tried to reallocate memory from {} to {}", old, new),
+            DeallocatedWrongMemoryKind(ref old, ref new) =>
+                write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new),
+            Math(span, ref err) =>
+                write!(f, "{:?} at {:?}", err, span),
+            Intrinsic(ref err) =>
+                write!(f, "{}", err),
+            InvalidChar(c) =>
+                write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
+            OutOfMemory { allocation_size, memory_size, memory_usage } =>
+                write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory",
+                       allocation_size, memory_size - memory_usage, memory_size),
+            AlignmentCheckFailed { required, has } =>
+               write!(f, "tried to access memory with alignment {}, but alignment {} is required",
+                      has, required),
+            TypeNotPrimitive(ty) =>
+                write!(f, "expected primitive type, got {}", ty),
+            Layout(ref err) =>
+                write!(f, "rustc layout computation failed: {:?}", err),
+            PathNotFound(ref path) =>
+                write!(f, "Cannot find path {:?}", path),
+            MachineError(ref inner) =>
+                write!(f, "machine error: {}", inner),
+            _ => write!(f, "{}", self.description()),
+        }
+    }
+}
diff --git a/src/librustc/mir/interpret/eval_context.rs b/src/librustc/mir/interpret/eval_context.rs
new file mode 100644 (file)
index 0000000..3388031
--- /dev/null
@@ -0,0 +1,2534 @@
+use std::collections::{HashMap, HashSet};
+use std::fmt::Write;
+
+use rustc::hir::def_id::DefId;
+use rustc::hir::map::definitions::DefPathData;
+use rustc::middle::const_val::ConstVal;
+use rustc::middle::region;
+use rustc::mir;
+use rustc::traits::Reveal;
+use rustc::ty::layout::{self, Layout, Size, Align, HasDataLayout};
+use rustc::ty::subst::{Subst, Substs, Kind};
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc_data_structures::indexed_vec::Idx;
+use syntax::codemap::{self, DUMMY_SP};
+use syntax::ast::Mutability;
+use syntax::abi::Abi;
+
+use super::{EvalError, EvalResult, EvalErrorKind, GlobalId, Lvalue, LvalueExtra, Memory,
+            MemoryPointer, HasMemory, MemoryKind, operator, PrimVal, PrimValKind, Value, Pointer,
+            ValidationQuery, Machine};
+
+pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
+    /// Stores data required by the `Machine`
+    pub machine_data: M::Data,
+
+    /// The results of the type checker, from rustc.
+    pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
+
+    /// The virtual memory system.
+    pub memory: Memory<'a, 'tcx, M>,
+
+    /// Lvalues that were suspended by the validation subsystem, and will be recovered later
+    pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
+
+    /// Precomputed statics, constants and promoteds.
+    pub globals: HashMap<GlobalId<'tcx>, PtrAndAlign>,
+
+    /// The virtual call stack.
+    pub(crate) stack: Vec<Frame<'tcx>>,
+
+    /// The maximum number of stack frames allowed
+    pub(crate) stack_limit: usize,
+
+    /// The maximum number of operations that may be executed.
+    /// This prevents infinite loops and huge computations from freezing up const eval.
+    /// Remove once halting problem is solved.
+    pub(crate) steps_remaining: u64,
+}
+
+/// A stack frame.
+pub struct Frame<'tcx> {
+    ////////////////////////////////////////////////////////////////////////////////
+    // Function and callsite information
+    ////////////////////////////////////////////////////////////////////////////////
+    /// The MIR for the function called on this frame.
+    pub mir: &'tcx mir::Mir<'tcx>,
+
+    /// The def_id and substs of the current function
+    pub instance: ty::Instance<'tcx>,
+
+    /// The span of the call site.
+    pub span: codemap::Span,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Return lvalue and locals
+    ////////////////////////////////////////////////////////////////////////////////
+    /// The block to return to when returning from the current stack frame
+    pub return_to_block: StackPopCleanup,
+
+    /// The location where the result of the current stack frame should be written to.
+    pub return_lvalue: Lvalue,
+
+    /// The list of locals for this stack frame, stored in order as
+    /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
+    /// `None` represents a local that is currently dead, while a live local
+    /// can either directly contain `PrimVal` or refer to some part of an `Allocation`.
+    ///
+    /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`.
+    pub locals: Vec<Option<Value>>,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Current position within the function
+    ////////////////////////////////////////////////////////////////////////////////
+    /// The block that is currently executed (or will be executed after the above call stacks
+    /// return).
+    pub block: mir::BasicBlock,
+
+    /// The index of the currently evaluated statment.
+    pub stmt: usize,
+}
+
+#[derive(Clone, Debug, Eq, PartialEq, Hash)]
+pub enum StackPopCleanup {
+    /// The stackframe existed to compute the initial value of a static/constant, make sure it
+    /// isn't modifyable afterwards in case of constants.
+    /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through
+    /// references or deallocated
+    MarkStatic(Mutability),
+    /// A regular stackframe added due to a function call will need to get forwarded to the next
+    /// block
+    Goto(mir::BasicBlock),
+    /// The main function and diverging functions have nowhere to return to
+    None,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct DynamicLifetime {
+    pub frame: usize,
+    pub region: Option<region::Scope>, // "None" indicates "until the function ends"
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct ResourceLimits {
+    pub memory_size: u64,
+    pub step_limit: u64,
+    pub stack_limit: usize,
+}
+
+impl Default for ResourceLimits {
+    fn default() -> Self {
+        ResourceLimits {
+            memory_size: 100 * 1024 * 1024, // 100 MB
+            step_limit: 1_000_000,
+            stack_limit: 100,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct TyAndPacked<'tcx> {
+    pub ty: Ty<'tcx>,
+    pub packed: bool,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct ValTy<'tcx> {
+    pub value: Value,
+    pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
+    type Target = Value;
+    fn deref(&self) -> &Value {
+        &self.value
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PtrAndAlign {
+    pub ptr: Pointer,
+    /// Remember whether this lvalue is *supposed* to be aligned.
+    pub aligned: bool,
+}
+
+impl PtrAndAlign {
+    pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> {
+        self.ptr.to_ptr()
+    }
+    pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+        Ok(PtrAndAlign {
+            ptr: self.ptr.offset(i, cx)?,
+            aligned: self.aligned,
+        })
+    }
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    pub fn new(
+        tcx: TyCtxt<'a, 'tcx, 'tcx>,
+        limits: ResourceLimits,
+        machine_data: M::Data,
+        memory_data: M::MemoryData,
+    ) -> Self {
+        EvalContext {
+            machine_data,
+            tcx,
+            memory: Memory::new(&tcx.data_layout, limits.memory_size, memory_data),
+            suspended: HashMap::new(),
+            globals: HashMap::new(),
+            stack: Vec::new(),
+            stack_limit: limits.stack_limit,
+            steps_remaining: limits.step_limit,
+        }
+    }
+
+    pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> {
+        let substs = self.substs();
+        self.alloc_ptr_with_substs(ty, substs)
+    }
+
+    pub fn alloc_ptr_with_substs(
+        &mut self,
+        ty: Ty<'tcx>,
+        substs: &'tcx Substs<'tcx>,
+    ) -> EvalResult<'tcx, MemoryPointer> {
+        let size = self.type_size_with_substs(ty, substs)?.expect(
+            "cannot alloc memory for unsized type",
+        );
+        let align = self.type_align_with_substs(ty, substs)?;
+        self.memory.allocate(size, align, MemoryKind::Stack)
+    }
+
+    pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
+        &self.memory
+    }
+
+    pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
+        &mut self.memory
+    }
+
+    pub fn stack(&self) -> &[Frame<'tcx>] {
+        &self.stack
+    }
+
+    #[inline]
+    pub fn cur_frame(&self) -> usize {
+        assert!(self.stack.len() > 0);
+        self.stack.len() - 1
+    }
+
+    pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
+        let ptr = self.memory.allocate_cached(s.as_bytes())?;
+        Ok(Value::ByValPair(
+            PrimVal::Ptr(ptr),
+            PrimVal::from_u128(s.len() as u128),
+        ))
+    }
+
+    pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
+        use rustc::middle::const_val::ConstVal::*;
+
+        let primval = match *const_val {
+            Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()),
+
+            Float(val) => PrimVal::Bytes(val.bits),
+
+            Bool(b) => PrimVal::from_bool(b),
+            Char(c) => PrimVal::from_char(c),
+
+            Str(ref s) => return self.str_to_value(s),
+
+            ByteStr(ref bs) => {
+                let ptr = self.memory.allocate_cached(bs.data)?;
+                PrimVal::Ptr(ptr)
+            }
+
+            Unevaluated(def_id, substs) => {
+                let instance = self.resolve_associated_const(def_id, substs);
+                let cid = GlobalId {
+                    instance,
+                    promoted: None,
+                };
+                return Ok(Value::ByRef(*self.globals.get(&cid).expect("static/const not cached")));
+            }
+
+            Aggregate(..) |
+            Variant(_) => bug!("should not have aggregate or variant constants in MIR"),
+            // function items are zero sized and thus have no readable value
+            Function(..) => PrimVal::Undef,
+        };
+
+        Ok(Value::ByVal(primval))
+    }
+
+    pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+        // generics are weird, don't run this function on a generic
+        assert!(!ty.needs_subst());
+        ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
+    }
+
+    pub fn load_mir(
+        &self,
+        instance: ty::InstanceDef<'tcx>,
+    ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
+        trace!("load mir {:?}", instance);
+        match instance {
+            ty::InstanceDef::Item(def_id) => {
+                self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| {
+                    EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into()
+                })
+            }
+            _ => Ok(self.tcx.instance_mir(instance)),
+        }
+    }
+
+    pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+        // miri doesn't care about lifetimes, and will choke on some crazy ones
+        // let's simply get rid of them
+        let without_lifetimes = self.tcx.erase_regions(&ty);
+        let substituted = without_lifetimes.subst(self.tcx, substs);
+        let substituted = self.tcx.normalize_associated_type(&substituted);
+        substituted
+    }
+
+    /// Return the size and aligment of the value at the given type.
+    /// Note that the value does not matter if the type is sized. For unsized types,
+    /// the value has to be a fat pointer, and we only care about the "extra" data in it.
+    pub fn size_and_align_of_dst(
+        &mut self,
+        ty: ty::Ty<'tcx>,
+        value: Value,
+    ) -> EvalResult<'tcx, (u64, u64)> {
+        if let Some(size) = self.type_size(ty)? {
+            Ok((size as u64, self.type_align(ty)? as u64))
+        } else {
+            match ty.sty {
+                ty::TyAdt(..) | ty::TyTuple(..) => {
+                    // First get the size of all statically known fields.
+                    // Don't use type_of::sizing_type_of because that expects t to be sized,
+                    // and it also rounds up to alignment, which we want to avoid,
+                    // as the unsized field's alignment could be smaller.
+                    assert!(!ty.is_simd());
+                    let layout = self.type_layout(ty)?;
+                    debug!("DST {} layout: {:?}", ty, layout);
+
+                    let (sized_size, sized_align) = match *layout {
+                        ty::layout::Layout::Univariant { ref variant, .. } => {
+                            (
+                                variant.offsets.last().map_or(0, |o| o.bytes()),
+                                variant.align,
+                            )
+                        }
+                        _ => {
+                            bug!(
+                                "size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
+                                ty,
+                                layout
+                            );
+                        }
+                    };
+                    debug!(
+                        "DST {} statically sized prefix size: {} align: {:?}",
+                        ty,
+                        sized_size,
+                        sized_align
+                    );
+
+                    // Recurse to get the size of the dynamically sized field (must be
+                    // the last field).
+                    let (unsized_size, unsized_align) = match ty.sty {
+                        ty::TyAdt(def, substs) => {
+                            let last_field = def.struct_variant().fields.last().unwrap();
+                            let field_ty = self.field_ty(substs, last_field);
+                            self.size_and_align_of_dst(field_ty, value)?
+                        }
+                        ty::TyTuple(ref types, _) => {
+                            let field_ty = types.last().unwrap();
+                            let field_ty = self.tcx.normalize_associated_type(field_ty);
+                            self.size_and_align_of_dst(field_ty, value)?
+                        }
+                        _ => bug!("We already checked that we know this type"),
+                    };
+
+                    // FIXME (#26403, #27023): We should be adding padding
+                    // to `sized_size` (to accommodate the `unsized_align`
+                    // required of the unsized field that follows) before
+                    // summing it with `sized_size`. (Note that since #26403
+                    // is unfixed, we do not yet add the necessary padding
+                    // here. But this is where the add would go.)
+
+                    // Return the sum of sizes and max of aligns.
+                    let size = sized_size + unsized_size;
+
+                    // Choose max of two known alignments (combined value must
+                    // be aligned according to more restrictive of the two).
+                    let align =
+                        sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap());
+
+                    // Issue #27023: must add any necessary padding to `size`
+                    // (to make it a multiple of `align`) before returning it.
+                    //
+                    // Namely, the returned size should be, in C notation:
+                    //
+                    //   `size + ((size & (align-1)) ? align : 0)`
+                    //
+                    // emulated via the semi-standard fast bit trick:
+                    //
+                    //   `(size + (align-1)) & -align`
+
+                    let size = Size::from_bytes(size).abi_align(align).bytes();
+                    Ok((size, align.abi()))
+                }
+                ty::TyDynamic(..) => {
+                    let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
+                    // the second entry in the vtable is the dynamic size of the object.
+                    self.read_size_and_align_from_vtable(vtable)
+                }
+
+                ty::TySlice(_) | ty::TyStr => {
+                    let elem_ty = ty.sequence_element_type(self.tcx);
+                    let elem_size = self.type_size(elem_ty)?.expect(
+                        "slice element must be sized",
+                    ) as u64;
+                    let (_, len) = value.into_slice(&mut self.memory)?;
+                    let align = self.type_align(elem_ty)?;
+                    Ok((len * elem_size, align as u64))
+                }
+
+                _ => bug!("size_of_val::<{:?}>", ty),
+            }
+        }
+    }
+
+    /// Returns the normalized type of a struct field
+    fn field_ty(&self, param_substs: &Substs<'tcx>, f: &ty::FieldDef) -> ty::Ty<'tcx> {
+        self.tcx.normalize_associated_type(
+            &f.ty(self.tcx, param_substs),
+        )
+    }
+
+    pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
+        self.type_size_with_substs(ty, self.substs())
+    }
+
+    pub fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
+        self.type_align_with_substs(ty, self.substs())
+    }
+
+    pub fn type_size_with_substs(
+        &self,
+        ty: Ty<'tcx>,
+        substs: &'tcx Substs<'tcx>,
+    ) -> EvalResult<'tcx, Option<u64>> {
+        let layout = self.type_layout_with_substs(ty, substs)?;
+        if layout.is_unsized() {
+            Ok(None)
+        } else {
+            Ok(Some(layout.size(&self.tcx.data_layout).bytes()))
+        }
+    }
+
+    pub fn type_align_with_substs(
+        &self,
+        ty: Ty<'tcx>,
+        substs: &'tcx Substs<'tcx>,
+    ) -> EvalResult<'tcx, u64> {
+        self.type_layout_with_substs(ty, substs).map(|layout| {
+            layout.align(&self.tcx.data_layout).abi()
+        })
+    }
+
+    pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
+        self.type_layout_with_substs(ty, self.substs())
+    }
+
+    fn type_layout_with_substs(
+        &self,
+        ty: Ty<'tcx>,
+        substs: &'tcx Substs<'tcx>,
+    ) -> EvalResult<'tcx, &'tcx Layout> {
+        // TODO(solson): Is this inefficient? Needs investigation.
+        let ty = self.monomorphize(ty, substs);
+
+        ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All))
+            .map_err(|layout| EvalErrorKind::Layout(layout).into())
+    }
+
+    pub fn push_stack_frame(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        span: codemap::Span,
+        mir: &'tcx mir::Mir<'tcx>,
+        return_lvalue: Lvalue,
+        return_to_block: StackPopCleanup,
+    ) -> EvalResult<'tcx> {
+        ::log_settings::settings().indentation += 1;
+
+        /// Return the set of locals that have a storage annotation anywhere
+        fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::Local> {
+            use rustc::mir::StatementKind::*;
+
+            let mut set = HashSet::new();
+            for block in mir.basic_blocks() {
+                for stmt in block.statements.iter() {
+                    match stmt.kind {
+                        StorageLive(local) |
+                        StorageDead(local) => {
+                            set.insert(local);
+                        }
+                        _ => {}
+                    }
+                }
+            }
+            set
+        }
+
+        // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local
+        // `Value` for that.
+        let num_locals = mir.local_decls.len() - 1;
+
+        let locals = {
+            let annotated_locals = collect_storage_annotations(mir);
+            let mut locals = vec![None; num_locals];
+            for i in 0..num_locals {
+                let local = mir::Local::new(i + 1);
+                if !annotated_locals.contains(&local) {
+                    locals[i] = Some(Value::ByVal(PrimVal::Undef));
+                }
+            }
+            locals
+        };
+
+        self.stack.push(Frame {
+            mir,
+            block: mir::START_BLOCK,
+            return_to_block,
+            return_lvalue,
+            locals,
+            span,
+            instance,
+            stmt: 0,
+        });
+
+        self.memory.cur_frame = self.cur_frame();
+
+        if self.stack.len() > self.stack_limit {
+            err!(StackFrameLimitReached)
+        } else {
+            Ok(())
+        }
+    }
+
+    pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
+        ::log_settings::settings().indentation -= 1;
+        self.end_region(None)?;
+        let frame = self.stack.pop().expect(
+            "tried to pop a stack frame, but there were none",
+        );
+        if !self.stack.is_empty() {
+            // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
+            self.memory.cur_frame = self.cur_frame();
+        }
+        match frame.return_to_block {
+            StackPopCleanup::MarkStatic(mutable) => {
+                if let Lvalue::Ptr { ptr, .. } = frame.return_lvalue {
+                    // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
+                    self.memory.mark_static_initalized(
+                        ptr.to_ptr()?.alloc_id,
+                        mutable,
+                    )?
+                } else {
+                    bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue);
+                }
+            }
+            StackPopCleanup::Goto(target) => self.goto_block(target),
+            StackPopCleanup::None => {}
+        }
+        // deallocate all locals that are backed by an allocation
+        for local in frame.locals {
+            self.deallocate_local(local)?;
+        }
+
+        Ok(())
+    }
+
+    pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
+        if let Some(Value::ByRef(ptr)) = local {
+            trace!("deallocating local");
+            let ptr = ptr.to_ptr()?;
+            self.memory.dump_alloc(ptr.alloc_id);
+            match self.memory.get(ptr.alloc_id)?.kind {
+                // for a constant like `const FOO: &i32 = &1;` the local containing
+                // the `1` is referred to by the global. We transitively marked everything
+                // the global refers to as static itself, so we don't free it here
+                MemoryKind::Static => {}
+                MemoryKind::Stack => self.memory.deallocate(ptr, None, MemoryKind::Stack)?,
+                other => bug!("local contained non-stack memory: {:?}", other),
+            }
+        };
+        Ok(())
+    }
+
+    pub fn assign_discr_and_fields(
+        &mut self,
+        dest: Lvalue,
+        dest_ty: Ty<'tcx>,
+        discr_offset: u64,
+        operands: &[mir::Operand<'tcx>],
+        discr_val: u128,
+        variant_idx: usize,
+        discr_size: u64,
+        discr_signed: bool,
+    ) -> EvalResult<'tcx> {
+        // FIXME(solson)
+        let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
+
+        let discr_dest = dest_ptr.offset(discr_offset, &self)?;
+        self.memory.write_primval(discr_dest, PrimVal::Bytes(discr_val), discr_size, discr_signed)?;
+
+        let dest = Lvalue::Ptr {
+            ptr: PtrAndAlign {
+                ptr: dest_ptr.into(),
+                aligned: true,
+            },
+            extra: LvalueExtra::DowncastVariant(variant_idx),
+        };
+
+        self.assign_fields(dest, dest_ty, operands)
+    }
+
+    pub fn assign_fields(
+        &mut self,
+        dest: Lvalue,
+        dest_ty: Ty<'tcx>,
+        operands: &[mir::Operand<'tcx>],
+    ) -> EvalResult<'tcx> {
+        if self.type_size(dest_ty)? == Some(0) {
+            // zst assigning is a nop
+            return Ok(());
+        }
+        if self.ty_to_primval_kind(dest_ty).is_ok() {
+            assert_eq!(operands.len(), 1);
+            let value = self.eval_operand(&operands[0])?;
+            return self.write_value(value, dest);
+        }
+        for (field_index, operand) in operands.iter().enumerate() {
+            let value = self.eval_operand(operand)?;
+            let field_dest = self.lvalue_field(dest, mir::Field::new(field_index), dest_ty, value.ty)?;
+            self.write_value(value, field_dest)?;
+        }
+        Ok(())
+    }
+
+    /// Evaluate an assignment statement.
+    ///
+    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+    /// type writes its results directly into the memory specified by the lvalue.
+    pub(super) fn eval_rvalue_into_lvalue(
+        &mut self,
+        rvalue: &mir::Rvalue<'tcx>,
+        lvalue: &mir::Lvalue<'tcx>,
+    ) -> EvalResult<'tcx> {
+        let dest = self.eval_lvalue(lvalue)?;
+        let dest_ty = self.lvalue_ty(lvalue);
+        let dest_layout = self.type_layout(dest_ty)?;
+
+        use rustc::mir::Rvalue::*;
+        match *rvalue {
+            Use(ref operand) => {
+                let value = self.eval_operand(operand)?.value;
+                let valty = ValTy {
+                    value,
+                    ty: dest_ty,
+                };
+                self.write_value(valty, dest)?;
+            }
+
+            BinaryOp(bin_op, ref left, ref right) => {
+                let left = self.eval_operand(left)?;
+                let right = self.eval_operand(right)?;
+                if self.intrinsic_overflowing(
+                    bin_op,
+                    left,
+                    right,
+                    dest,
+                    dest_ty,
+                )?
+                {
+                    // There was an overflow in an unchecked binop.  Right now, we consider this an error and bail out.
+                    // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops
+                    // it emits in debug mode) is performance, but it doesn't cost us any performance in miri.
+                    // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops,
+                    // we have to go back to just ignoring the overflow here.
+                    return err!(OverflowingMath);
+                }
+            }
+
+            CheckedBinaryOp(bin_op, ref left, ref right) => {
+                let left = self.eval_operand(left)?;
+                let right = self.eval_operand(right)?;
+                self.intrinsic_with_overflow(
+                    bin_op,
+                    left,
+                    right,
+                    dest,
+                    dest_ty,
+                )?;
+            }
+
+            UnaryOp(un_op, ref operand) => {
+                let val = self.eval_operand_to_primval(operand)?;
+                let kind = self.ty_to_primval_kind(dest_ty)?;
+                self.write_primval(
+                    dest,
+                    operator::unary_op(un_op, val, kind)?,
+                    dest_ty,
+                )?;
+            }
+
+            // Skip everything for zsts
+            Aggregate(..) if self.type_size(dest_ty)? == Some(0) => {}
+
+            Aggregate(ref kind, ref operands) => {
+                self.inc_step_counter_and_check_limit(operands.len() as u64)?;
+                use rustc::ty::layout::Layout::*;
+                match *dest_layout {
+                    Univariant { ref variant, .. } => {
+                        self.write_maybe_aligned_mut(!variant.packed, |ecx| {
+                            ecx.assign_fields(dest, dest_ty, operands)
+                        })?;
+                    }
+
+                    Array { .. } => {
+                        self.assign_fields(dest, dest_ty, operands)?;
+                    }
+
+                    General {
+                        discr,
+                        ref variants,
+                        ..
+                    } => {
+                        if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
+                            let discr_val = adt_def
+                                .discriminants(self.tcx)
+                                .nth(variant)
+                                .expect("broken mir: Adt variant id invalid")
+                                .to_u128_unchecked();
+                            let discr_size = discr.size().bytes();
+
+                            self.assign_discr_and_fields(
+                                dest,
+                                dest_ty,
+                                variants[variant].offsets[0].bytes(),
+                                operands,
+                                discr_val,
+                                variant,
+                                discr_size,
+                                false,
+                            )?;
+                        } else {
+                            bug!("tried to assign {:?} to Layout::General", kind);
+                        }
+                    }
+
+                    RawNullablePointer { nndiscr, .. } => {
+                        if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
+                            if nndiscr == variant as u64 {
+                                assert_eq!(operands.len(), 1);
+                                let operand = &operands[0];
+                                let value = self.eval_operand(operand)?;
+                                self.write_value(value, dest)?;
+                            } else {
+                                if let Some(operand) = operands.get(0) {
+                                    assert_eq!(operands.len(), 1);
+                                    let operand_ty = self.operand_ty(operand);
+                                    assert_eq!(self.type_size(operand_ty)?, Some(0));
+                                }
+                                self.write_null(dest, dest_ty)?;
+                            }
+                        } else {
+                            bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
+                        }
+                    }
+
+                    StructWrappedNullablePointer {
+                        nndiscr,
+                        ref discrfield_source,
+                        ref nonnull,
+                        ..
+                    } => {
+                        if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
+                            if nndiscr == variant as u64 {
+                                self.write_maybe_aligned_mut(!nonnull.packed, |ecx| {
+                                    ecx.assign_fields(dest, dest_ty, operands)
+                                })?;
+                            } else {
+                                for operand in operands {
+                                    let operand_ty = self.operand_ty(operand);
+                                    assert_eq!(self.type_size(operand_ty)?, Some(0));
+                                }
+                                self.write_struct_wrapped_null_pointer(
+                                    dest_ty,
+                                    nndiscr,
+                                    discrfield_source,
+                                    dest,
+                                )?;
+                            }
+                        } else {
+                            bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
+                        }
+                    }
+
+                    CEnum { .. } => {
+                        assert_eq!(operands.len(), 0);
+                        if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
+                            let n = adt_def
+                                .discriminants(self.tcx)
+                                .nth(variant)
+                                .expect("broken mir: Adt variant index invalid")
+                                .to_u128_unchecked();
+                            self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?;
+                        } else {
+                            bug!("tried to assign {:?} to Layout::CEnum", kind);
+                        }
+                    }
+
+                    Vector { count, .. } => {
+                        debug_assert_eq!(count, operands.len() as u64);
+                        self.assign_fields(dest, dest_ty, operands)?;
+                    }
+
+                    UntaggedUnion { ref variants } => {
+                        assert_eq!(operands.len(), 1);
+                        let operand = &operands[0];
+                        let value = self.eval_operand(operand)?;
+                        self.write_maybe_aligned_mut(!variants.packed, |ecx| {
+                            ecx.write_value(value, dest)
+                        })?;
+                    }
+
+                    _ => {
+                        return err!(Unimplemented(format!(
+                            "can't handle destination layout {:?} when assigning {:?}",
+                            dest_layout,
+                            kind
+                        )));
+                    }
+                }
+            }
+
+            Repeat(ref operand, _) => {
+                let (elem_ty, length) = match dest_ty.sty {
+                    ty::TyArray(elem_ty, n) => (elem_ty, n.val.to_const_int().unwrap().to_u64().unwrap()),
+                    _ => {
+                        bug!(
+                            "tried to assign array-repeat to non-array type {:?}",
+                            dest_ty
+                        )
+                    }
+                };
+                self.inc_step_counter_and_check_limit(length)?;
+                let elem_size = self.type_size(elem_ty)?.expect(
+                    "repeat element type must be sized",
+                );
+                let value = self.eval_operand(operand)?.value;
+
+                // FIXME(solson)
+                let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
+
+                for i in 0..length {
+                    let elem_dest = dest.offset(i * elem_size, &self)?;
+                    self.write_value_to_ptr(value, elem_dest, elem_ty)?;
+                }
+            }
+
+            Len(ref lvalue) => {
+                // FIXME(CTFE): don't allow computing the length of arrays in const eval
+                let src = self.eval_lvalue(lvalue)?;
+                let ty = self.lvalue_ty(lvalue);
+                let (_, len) = src.elem_ty_and_len(ty);
+                self.write_primval(
+                    dest,
+                    PrimVal::from_u128(len as u128),
+                    dest_ty,
+                )?;
+            }
+
+            Ref(_, _, ref lvalue) => {
+                let src = self.eval_lvalue(lvalue)?;
+                // We ignore the alignment of the lvalue here -- special handling for packed structs ends
+                // at the `&` operator.
+                let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned();
+
+                let val = match extra {
+                    LvalueExtra::None => ptr.ptr.to_value(),
+                    LvalueExtra::Length(len) => ptr.ptr.to_value_with_len(len),
+                    LvalueExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable),
+                    LvalueExtra::DowncastVariant(..) => {
+                        bug!("attempted to take a reference to an enum downcast lvalue")
+                    }
+                };
+                let valty = ValTy {
+                    value: val,
+                    ty: dest_ty,
+                };
+                self.write_value(valty, dest)?;
+            }
+
+            NullaryOp(mir::NullOp::Box, ty) => {
+                M::box_alloc(self, ty, dest)?;
+            }
+
+            NullaryOp(mir::NullOp::SizeOf, ty) => {
+                let size = self.type_size(ty)?.expect(
+                    "SizeOf nullary MIR operator called for unsized type",
+                );
+                self.write_primval(
+                    dest,
+                    PrimVal::from_u128(size as u128),
+                    dest_ty,
+                )?;
+            }
+
+            Cast(kind, ref operand, cast_ty) => {
+                debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
+                use rustc::mir::CastKind::*;
+                match kind {
+                    Unsize => {
+                        let src = self.eval_operand(operand)?;
+                        self.unsize_into(src.value, src.ty, dest, dest_ty)?;
+                    }
+
+                    Misc => {
+                        let src = self.eval_operand(operand)?;
+                        if self.type_is_fat_ptr(src.ty) {
+                            match (src.value, self.type_is_fat_ptr(dest_ty)) {
+                                (Value::ByRef { .. }, _) |
+                                (Value::ByValPair(..), true) => {
+                                    let valty = ValTy {
+                                        value: src.value,
+                                        ty: dest_ty,
+                                    };
+                                    self.write_value(valty, dest)?;
+                                }
+                                (Value::ByValPair(data, _), false) => {
+                                    let valty = ValTy {
+                                        value: Value::ByVal(data),
+                                        ty: dest_ty,
+                                    };
+                                    self.write_value(valty, dest)?;
+                                }
+                                (Value::ByVal(_), _) => bug!("expected fat ptr"),
+                            }
+                        } else {
+                            let src_val = self.value_to_primval(src)?;
+                            let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?;
+                            let valty = ValTy {
+                                value: Value::ByVal(dest_val),
+                                ty: dest_ty,
+                            };
+                            self.write_value(valty, dest)?;
+                        }
+                    }
+
+                    ReifyFnPointer => {
+                        match self.operand_ty(operand).sty {
+                            ty::TyFnDef(def_id, substs) => {
+                                let instance = resolve(self.tcx, def_id, substs);
+                                let fn_ptr = self.memory.create_fn_alloc(instance);
+                                let valty = ValTy {
+                                    value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
+                                    ty: dest_ty,
+                                };
+                                self.write_value(valty, dest)?;
+                            }
+                            ref other => bug!("reify fn pointer on {:?}", other),
+                        }
+                    }
+
+                    UnsafeFnPointer => {
+                        match dest_ty.sty {
+                            ty::TyFnPtr(_) => {
+                                let mut src = self.eval_operand(operand)?;
+                                src.ty = dest_ty;
+                                self.write_value(src, dest)?;
+                            }
+                            ref other => bug!("fn to unsafe fn cast on {:?}", other),
+                        }
+                    }
+
+                    ClosureFnPointer => {
+                        match self.operand_ty(operand).sty {
+                            ty::TyClosure(def_id, substs) => {
+                                let instance = resolve_closure(
+                                    self.tcx,
+                                    def_id,
+                                    substs,
+                                    ty::ClosureKind::FnOnce,
+                                );
+                                let fn_ptr = self.memory.create_fn_alloc(instance);
+                                let valty = ValTy {
+                                    value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
+                                    ty: dest_ty,
+                                };
+                                self.write_value(valty, dest)?;
+                            }
+                            ref other => bug!("closure fn pointer on {:?}", other),
+                        }
+                    }
+                }
+            }
+
+            Discriminant(ref lvalue) => {
+                let lval = self.eval_lvalue(lvalue)?;
+                let ty = self.lvalue_ty(lvalue);
+                let ptr = self.force_allocation(lval)?.to_ptr()?;
+                let discr_val = self.read_discriminant_value(ptr, ty)?;
+                if let ty::TyAdt(adt_def, _) = ty.sty {
+                    trace!("Read discriminant {}, valid discriminants {:?}", discr_val, adt_def.discriminants(self.tcx).collect::<Vec<_>>());
+                    if adt_def.discriminants(self.tcx).all(|v| {
+                        discr_val != v.to_u128_unchecked()
+                    })
+                    {
+                        return err!(InvalidDiscriminant);
+                    }
+                    self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
+                } else {
+                    bug!("rustc only generates Rvalue::Discriminant for enums");
+                }
+            }
+        }
+
+        if log_enabled!(::log::LogLevel::Trace) {
+            self.dump_local(dest);
+        }
+
+        Ok(())
+    }
+
+    pub(crate) fn write_struct_wrapped_null_pointer(
+        &mut self,
+        dest_ty: ty::Ty<'tcx>,
+        nndiscr: u64,
+        discrfield_source: &layout::FieldPath,
+        dest: Lvalue,
+    ) -> EvalResult<'tcx> {
+        let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(
+            dest_ty,
+            nndiscr,
+            discrfield_source,
+        )?;
+        let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(
+            offset.bytes(),
+            &self,
+        )?;
+        trace!("struct wrapped nullable pointer type: {}", ty);
+        // only the pointer part of a fat pointer is used for this space optimization
+        let discr_size = self.type_size(ty)?.expect(
+            "bad StructWrappedNullablePointer discrfield",
+        );
+        self.memory.write_maybe_aligned_mut(!packed, |mem| {
+            // We're writing 0, signedness does not matter
+            mem.write_primval(nonnull, PrimVal::Bytes(0), discr_size, false)
+        })
+    }
+
+    pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
+        match ty.sty {
+            ty::TyRawPtr(ref tam) |
+            ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty),
+            ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
+            _ => false,
+        }
+    }
+
+    pub(super) fn nonnull_offset_and_ty(
+        &self,
+        ty: Ty<'tcx>,
+        nndiscr: u64,
+        discrfield: &[u32],
+    ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
+        // Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant
+        let path = discrfield.iter().skip(2).map(|&i| i as usize);
+
+        // Handle the field index for the outer non-null variant.
+        let (inner_offset, inner_ty) = match ty.sty {
+            ty::TyAdt(adt_def, substs) => {
+                let variant = &adt_def.variants[nndiscr as usize];
+                let index = discrfield[1];
+                let field = &variant.fields[index as usize];
+                (
+                    self.get_field_offset(ty, index as usize)?,
+                    field.ty(self.tcx, substs),
+                )
+            }
+            _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty),
+        };
+
+        self.field_path_offset_and_ty(inner_offset, inner_ty, path)
+    }
+
+    fn field_path_offset_and_ty<I: Iterator<Item = usize>>(
+        &self,
+        mut offset: Size,
+        mut ty: Ty<'tcx>,
+        path: I,
+    ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
+        // Skip the initial 0 intended for LLVM GEP.
+        let mut packed = false;
+        for field_index in path {
+            let field_offset = self.get_field_offset(ty, field_index)?;
+            trace!(
+                "field_path_offset_and_ty: {}, {}, {:?}, {:?}",
+                field_index,
+                ty,
+                field_offset,
+                offset
+            );
+            let field_ty = self.get_field_ty(ty, field_index)?;
+            ty = field_ty.ty;
+            packed = packed || field_ty.packed;
+            offset = offset
+                .checked_add(field_offset, &self.tcx.data_layout)
+                .unwrap();
+        }
+
+        Ok((offset, TyAndPacked { ty, packed }))
+    }
+    fn get_fat_field(
+        &self,
+        pointee_ty: Ty<'tcx>,
+        field_index: usize,
+    ) -> EvalResult<'tcx, Ty<'tcx>> {
+        match (field_index, &self.tcx.struct_tail(pointee_ty).sty) {
+            (1, &ty::TyStr) |
+            (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize),
+            (1, &ty::TyDynamic(..)) |
+            (0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)),
+            _ => bug!("invalid fat pointee type: {}", pointee_ty),
+        }
+    }
+
+    /// Returns the field type and whether the field is packed
+    pub fn get_field_ty(
+        &self,
+        ty: Ty<'tcx>,
+        field_index: usize,
+    ) -> EvalResult<'tcx, TyAndPacked<'tcx>> {
+        match ty.sty {
+            ty::TyAdt(adt_def, _) if adt_def.is_box() => Ok(TyAndPacked {
+                ty: self.get_fat_field(ty.boxed_ty(), field_index)?,
+                packed: false,
+            }),
+            ty::TyAdt(adt_def, substs) if adt_def.is_enum() => {
+                use rustc::ty::layout::Layout::*;
+                match *self.type_layout(ty)? {
+                    RawNullablePointer { nndiscr, .. } => Ok(TyAndPacked {
+                        ty: adt_def.variants[nndiscr as usize].fields[field_index].ty(
+                            self.tcx,
+                            substs,
+                        ),
+                        packed: false,
+                    }),
+                    StructWrappedNullablePointer {
+                        nndiscr,
+                        ref nonnull,
+                        ..
+                    } => {
+                        let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty(
+                            self.tcx,
+                            substs,
+                        );
+                        Ok(TyAndPacked {
+                            ty,
+                            packed: nonnull.packed,
+                        })
+                    }
+                    // mir optimizations treat single variant enums as structs
+                    General { .. } if adt_def.variants.len() == 1 => Ok(TyAndPacked {
+                        ty: adt_def.variants[0].fields[field_index].ty(self.tcx, substs),
+                        packed: false,
+                    }),
+                    _ => {
+                        err!(Unimplemented(format!(
+                            "get_field_ty can't handle enum type: {:?}, {:?}",
+                            ty,
+                            ty.sty
+                        )))
+                    }
+                }
+            }
+            ty::TyAdt(adt_def, substs) => {
+                let variant_def = adt_def.struct_variant();
+                use rustc::ty::layout::Layout::*;
+                match *self.type_layout(ty)? {
+                    UntaggedUnion { ref variants } => Ok(TyAndPacked {
+                        ty: variant_def.fields[field_index].ty(self.tcx, substs),
+                        packed: variants.packed,
+                    }),
+                    Univariant { ref variant, .. } => Ok(TyAndPacked {
+                        ty: variant_def.fields[field_index].ty(self.tcx, substs),
+                        packed: variant.packed,
+                    }),
+                    _ => {
+                        err!(Unimplemented(format!(
+                            "get_field_ty can't handle struct type: {:?}, {:?}",
+                            ty,
+                            ty.sty
+                        )))
+                    }
+                }
+            }
+
+            ty::TyTuple(fields, _) => Ok(TyAndPacked {
+                ty: fields[field_index],
+                packed: false,
+            }),
+
+            ty::TyRef(_, ref tam) |
+            ty::TyRawPtr(ref tam) => Ok(TyAndPacked {
+                ty: self.get_fat_field(tam.ty, field_index)?,
+                packed: false,
+            }),
+
+            ty::TyArray(ref inner, _) => Ok(TyAndPacked {
+                ty: inner,
+                packed: false,
+            }),
+
+            ty::TyClosure(def_id, ref closure_substs) => Ok(TyAndPacked {
+                ty: closure_substs.upvar_tys(def_id, self.tcx).nth(field_index).unwrap(),
+                packed: false,
+            }),
+
+            _ => {
+                err!(Unimplemented(
+                    format!("can't handle type: {:?}, {:?}", ty, ty.sty),
+                ))
+            }
+        }
+    }
+
+    fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
+        // Also see lvalue_field in lvalue.rs, which handles more cases but needs an actual value at the given type
+        let layout = self.type_layout(ty)?;
+
+        use rustc::ty::layout::Layout::*;
+        match *layout {
+            Univariant { ref variant, .. } => Ok(variant.offsets[field_index]),
+            FatPointer { .. } => {
+                let bytes = field_index as u64 * self.memory.pointer_size();
+                Ok(Size::from_bytes(bytes))
+            }
+            StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets[field_index]),
+            UntaggedUnion { .. } => Ok(Size::from_bytes(0)),
+            // mir optimizations treat single variant enums as structs
+            General { ref variants, .. } if variants.len() == 1 => Ok(variants[0].offsets[field_index]),
+            _ => {
+                let msg = format!(
+                    "get_field_offset: can't handle type: {:?}, with layout: {:?}",
+                    ty,
+                    layout
+                );
+                err!(Unimplemented(msg))
+            }
+        }
+    }
+
+    pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
+        let layout = self.type_layout(ty)?;
+
+        use rustc::ty::layout::Layout::*;
+        match *layout {
+            Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64),
+            FatPointer { .. } => Ok(2),
+            StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64),
+            Vector { count, .. } |
+            Array { count, .. } => Ok(count),
+            Scalar { .. } => Ok(0),
+            UntaggedUnion { .. } => Ok(1),
+            _ => {
+                let msg = format!(
+                    "get_field_count: can't handle type: {:?}, with layout: {:?}",
+                    ty,
+                    layout
+                );
+                err!(Unimplemented(msg))
+            }
+        }
+    }
+
+    pub(super) fn eval_operand_to_primval(
+        &mut self,
+        op: &mir::Operand<'tcx>,
+    ) -> EvalResult<'tcx, PrimVal> {
+        let valty = self.eval_operand(op)?;
+        self.value_to_primval(valty)
+    }
+
+    pub(crate) fn operands_to_args(
+        &mut self,
+        ops: &[mir::Operand<'tcx>],
+    ) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
+        ops.into_iter()
+            .map(|op| self.eval_operand(op))
+            .collect()
+    }
+
+    pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
+        use rustc::mir::Operand::*;
+        match *op {
+            Consume(ref lvalue) => {
+                Ok(ValTy {
+                    value: self.eval_and_read_lvalue(lvalue)?,
+                    ty: self.operand_ty(op),
+                })
+            },
+
+            Constant(ref constant) => {
+                use rustc::mir::Literal;
+                let mir::Constant { ref literal, .. } = **constant;
+                let value = match *literal {
+                    Literal::Value { ref value } => self.const_to_value(&value.val)?,
+
+                    Literal::Promoted { index } => {
+                        let cid = GlobalId {
+                            instance: self.frame().instance,
+                            promoted: Some(index),
+                        };
+                        Value::ByRef(*self.globals.get(&cid).expect("promoted not cached"))
+                    }
+                };
+
+                Ok(ValTy {
+                    value,
+                    ty: self.operand_ty(op),
+                })
+            }
+        }
+    }
+
+    pub fn read_discriminant_value(
+        &self,
+        adt_ptr: MemoryPointer,
+        adt_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, u128> {
+        use rustc::ty::layout::Layout::*;
+        let adt_layout = self.type_layout(adt_ty)?;
+        //trace!("read_discriminant_value {:#?}", adt_layout);
+
+        let discr_val = match *adt_layout {
+            General { discr, .. } => {
+                let discr_size = discr.size().bytes();
+                self.memory.read_primval(adt_ptr, discr_size, false)?.to_bytes()?
+            }
+
+            CEnum {
+                discr,
+                signed,
+                ..
+            } => {
+                let discr_size = discr.size().bytes();
+                self.memory.read_primval(adt_ptr, discr_size, signed)?.to_bytes()?
+            }
+
+            RawNullablePointer { nndiscr, value } => {
+                let discr_size = value.size(&self.tcx.data_layout).bytes();
+                trace!("rawnullablepointer with size {}", discr_size);
+                self.read_nonnull_discriminant_value(
+                    adt_ptr,
+                    nndiscr as u128,
+                    discr_size,
+                )?
+            }
+
+            StructWrappedNullablePointer {
+                nndiscr,
+                ref discrfield_source,
+                ..
+            } => {
+                let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(
+                    adt_ty,
+                    nndiscr,
+                    discrfield_source,
+                )?;
+                let nonnull = adt_ptr.offset(offset.bytes(), &*self)?;
+                trace!("struct wrapped nullable pointer type: {}", ty);
+                // only the pointer part of a fat pointer is used for this space optimization
+                let discr_size = self.type_size(ty)?.expect(
+                    "bad StructWrappedNullablePointer discrfield",
+                );
+                self.read_maybe_aligned(!packed, |ectx| {
+                    ectx.read_nonnull_discriminant_value(nonnull, nndiscr as u128, discr_size)
+                })?
+            }
+
+            // The discriminant_value intrinsic returns 0 for non-sum types.
+            Array { .. } |
+            FatPointer { .. } |
+            Scalar { .. } |
+            Univariant { .. } |
+            Vector { .. } |
+            UntaggedUnion { .. } => 0,
+        };
+
+        Ok(discr_val)
+    }
+
+    fn read_nonnull_discriminant_value(
+        &self,
+        ptr: MemoryPointer,
+        nndiscr: u128,
+        discr_size: u64,
+    ) -> EvalResult<'tcx, u128> {
+        trace!(
+            "read_nonnull_discriminant_value: {:?}, {}, {}",
+            ptr,
+            nndiscr,
+            discr_size
+        );
+        // We are only interested in 0 vs. non-0, the sign does not matter for this
+        let null = match self.memory.read_primval(ptr, discr_size, false)? {
+            PrimVal::Bytes(0) => true,
+            PrimVal::Bytes(_) |
+            PrimVal::Ptr(..) => false,
+            PrimVal::Undef => return err!(ReadUndefBytes),
+        };
+        assert!(nndiscr == 0 || nndiscr == 1);
+        Ok(if !null { nndiscr } else { 1 - nndiscr })
+    }
+
+    pub fn read_global_as_value(&self, gid: GlobalId) -> Value {
+        Value::ByRef(*self.globals.get(&gid).expect("global not cached"))
+    }
+
+    pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
+        self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
+    }
+
+    fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
+        let size = self.type_size(ty)?.expect(
+            "cannot copy from an unsized type",
+        );
+        let align = self.type_align(ty)?;
+        self.memory.copy(src, dest, size, align, false)?;
+        Ok(())
+    }
+
+    pub fn is_packed(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, bool> {
+        let layout = self.type_layout(ty)?;
+        use rustc::ty::layout::Layout::*;
+        Ok(match *layout {
+            Univariant { ref variant, .. } => variant.packed,
+
+            StructWrappedNullablePointer { ref nonnull, .. } => nonnull.packed,
+
+            UntaggedUnion { ref variants } => variants.packed,
+
+            // can only apply #[repr(packed)] to struct and union
+            _ => false,
+        })
+    }
+
+    pub fn force_allocation(&mut self, lvalue: Lvalue) -> EvalResult<'tcx, Lvalue> {
+        let new_lvalue = match lvalue {
+            Lvalue::Local { frame, local } => {
+                // -1 since we don't store the return value
+                match self.stack[frame].locals[local.index() - 1] {
+                    None => return err!(DeadLocal),
+                    Some(Value::ByRef(ptr)) => {
+                        Lvalue::Ptr {
+                            ptr,
+                            extra: LvalueExtra::None,
+                        }
+                    }
+                    Some(val) => {
+                        let ty = self.stack[frame].mir.local_decls[local].ty;
+                        let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
+                        let substs = self.stack[frame].instance.substs;
+                        let ptr = self.alloc_ptr_with_substs(ty, substs)?;
+                        self.stack[frame].locals[local.index() - 1] =
+                            Some(Value::by_ref(ptr.into())); // it stays live
+                        self.write_value_to_ptr(val, ptr.into(), ty)?;
+                        Lvalue::from_ptr(ptr)
+                    }
+                }
+            }
+            Lvalue::Ptr { .. } => lvalue,
+        };
+        Ok(new_lvalue)
+    }
+
+    /// ensures this Value is not a ByRef
+    pub(super) fn follow_by_ref_value(
+        &self,
+        value: Value,
+        ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, Value> {
+        match value {
+            Value::ByRef(PtrAndAlign { ptr, aligned }) => {
+                self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
+            }
+            other => Ok(other),
+        }
+    }
+
+    pub fn value_to_primval(
+        &self,
+        ValTy { value, ty } : ValTy<'tcx>,
+    ) -> EvalResult<'tcx, PrimVal> {
+        match self.follow_by_ref_value(value, ty)? {
+            Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
+
+            Value::ByVal(primval) => {
+                // TODO: Do we really want insta-UB here?
+                self.ensure_valid_value(primval, ty)?;
+                Ok(primval)
+            }
+
+            Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
+        }
+    }
+
+    pub fn write_null(&mut self, dest: Lvalue, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
+        self.write_primval(dest, PrimVal::Bytes(0), dest_ty)
+    }
+
+    pub fn write_ptr(&mut self, dest: Lvalue, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
+        let valty = ValTy {
+            value: val.to_value(),
+            ty: dest_ty,
+        };
+        self.write_value(valty, dest)
+    }
+
+    pub fn write_primval(
+        &mut self,
+        dest: Lvalue,
+        val: PrimVal,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        let valty = ValTy {
+            value: Value::ByVal(val),
+            ty: dest_ty,
+        };
+        self.write_value(valty, dest)
+    }
+
+    pub fn write_value(
+        &mut self,
+        ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
+        dest: Lvalue,
+    ) -> EvalResult<'tcx> {
+        //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
+        // Note that it is really important that the type here is the right one, and matches the type things are read at.
+        // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only
+        // correct if we never look at this data with the wrong type.
+
+        match dest {
+            Lvalue::Ptr {
+                ptr: PtrAndAlign { ptr, aligned },
+                extra,
+            } => {
+                assert_eq!(extra, LvalueExtra::None);
+                self.write_maybe_aligned_mut(
+                    aligned,
+                    |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty),
+                )
+            }
+
+            Lvalue::Local { frame, local } => {
+                let dest = self.stack[frame].get_local(local)?;
+                self.write_value_possibly_by_val(
+                    src_val,
+                    |this, val| this.stack[frame].set_local(local, val),
+                    dest,
+                    dest_ty,
+                )
+            }
+        }
+    }
+
+    // The cases here can be a bit subtle. Read carefully!
+    fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
+        &mut self,
+        src_val: Value,
+        write_dest: F,
+        old_dest_val: Value,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        if let Value::ByRef(PtrAndAlign {
+                                ptr: dest_ptr,
+                                aligned,
+                            }) = old_dest_val
+        {
+            // If the value is already `ByRef` (that is, backed by an `Allocation`),
+            // then we must write the new value into this allocation, because there may be
+            // other pointers into the allocation. These other pointers are logically
+            // pointers into the local variable, and must be able to observe the change.
+            //
+            // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
+            // knew for certain that there were no outstanding pointers to this allocation.
+            self.write_maybe_aligned_mut(aligned, |ectx| {
+                ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty)
+            })?;
+
+        } else if let Value::ByRef(PtrAndAlign {
+                                       ptr: src_ptr,
+                                       aligned,
+                                   }) = src_val
+        {
+            // If the value is not `ByRef`, then we know there are no pointers to it
+            // and we can simply overwrite the `Value` in the locals array directly.
+            //
+            // In this specific case, where the source value is `ByRef`, we must duplicate
+            // the allocation, because this is a by-value operation. It would be incorrect
+            // if they referred to the same allocation, since then a change to one would
+            // implicitly change the other.
+            //
+            // It is a valid optimization to attempt reading a primitive value out of the
+            // source and write that into the destination without making an allocation, so
+            // we do so here.
+            self.read_maybe_aligned_mut(aligned, |ectx| {
+                if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
+                    write_dest(ectx, src_val)?;
+                } else {
+                    let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
+                    ectx.copy(src_ptr, dest_ptr, dest_ty)?;
+                    write_dest(ectx, Value::by_ref(dest_ptr))?;
+                }
+                Ok(())
+            })?;
+
+        } else {
+            // Finally, we have the simple case where neither source nor destination are
+            // `ByRef`. We may simply copy the source value over the the destintion.
+            write_dest(self, src_val)?;
+        }
+        Ok(())
+    }
+
+    pub fn write_value_to_ptr(
+        &mut self,
+        value: Value,
+        dest: Pointer,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        match value {
+            Value::ByRef(PtrAndAlign { ptr, aligned }) => {
+                self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
+            }
+            Value::ByVal(primval) => {
+                let size = self.type_size(dest_ty)?.expect("dest type must be sized");
+                if size == 0 {
+                    assert!(primval.is_undef());
+                    Ok(())
+                } else {
+                    // TODO: Do we need signedness?
+                    self.memory.write_primval(dest.to_ptr()?, primval, size, false)
+                }
+            }
+            Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty),
+        }
+    }
+
+    pub fn write_pair_to_ptr(
+        &mut self,
+        a: PrimVal,
+        b: PrimVal,
+        ptr: MemoryPointer,
+        mut ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        let mut packed = false;
+        while self.get_field_count(ty)? == 1 {
+            let field = self.get_field_ty(ty, 0)?;
+            ty = field.ty;
+            packed = packed || field.packed;
+        }
+        assert_eq!(self.get_field_count(ty)?, 2);
+        let field_0 = self.get_field_offset(ty, 0)?;
+        let field_1 = self.get_field_offset(ty, 1)?;
+        let field_0_ty = self.get_field_ty(ty, 0)?;
+        let field_1_ty = self.get_field_ty(ty, 1)?;
+        assert_eq!(
+            field_0_ty.packed,
+            field_1_ty.packed,
+            "the two fields must agree on being packed"
+        );
+        packed = packed || field_0_ty.packed;
+        let field_0_size = self.type_size(field_0_ty.ty)?.expect(
+            "pair element type must be sized",
+        );
+        let field_1_size = self.type_size(field_1_ty.ty)?.expect(
+            "pair element type must be sized",
+        );
+        let field_0_ptr = ptr.offset(field_0.bytes(), &self)?.into();
+        let field_1_ptr = ptr.offset(field_1.bytes(), &self)?.into();
+        // TODO: What about signedess?
+        self.write_maybe_aligned_mut(!packed, |ectx| {
+            ectx.memory.write_primval(field_0_ptr, a, field_0_size, false)
+        })?;
+        self.write_maybe_aligned_mut(!packed, |ectx| {
+            ectx.memory.write_primval(field_1_ptr, b, field_1_size, false)
+        })?;
+        Ok(())
+    }
+
+    pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> {
+        use syntax::ast::FloatTy;
+
+        let kind = match ty.sty {
+            ty::TyBool => PrimValKind::Bool,
+            ty::TyChar => PrimValKind::Char,
+
+            ty::TyInt(int_ty) => {
+                use syntax::ast::IntTy::*;
+                let size = match int_ty {
+                    I8 => 1,
+                    I16 => 2,
+                    I32 => 4,
+                    I64 => 8,
+                    I128 => 16,
+                    Is => self.memory.pointer_size(),
+                };
+                PrimValKind::from_int_size(size)
+            }
+
+            ty::TyUint(uint_ty) => {
+                use syntax::ast::UintTy::*;
+                let size = match uint_ty {
+                    U8 => 1,
+                    U16 => 2,
+                    U32 => 4,
+                    U64 => 8,
+                    U128 => 16,
+                    Us => self.memory.pointer_size(),
+                };
+                PrimValKind::from_uint_size(size)
+            }
+
+            ty::TyFloat(FloatTy::F32) => PrimValKind::F32,
+            ty::TyFloat(FloatTy::F64) => PrimValKind::F64,
+
+            ty::TyFnPtr(_) => PrimValKind::FnPtr,
+
+            ty::TyRef(_, ref tam) |
+            ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr,
+
+            ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr,
+
+            ty::TyAdt(def, substs) => {
+                use rustc::ty::layout::Layout::*;
+                match *self.type_layout(ty)? {
+                    CEnum { discr, signed, .. } => {
+                        let size = discr.size().bytes();
+                        if signed {
+                            PrimValKind::from_int_size(size)
+                        } else {
+                            PrimValKind::from_uint_size(size)
+                        }
+                    }
+
+                    RawNullablePointer { value, .. } => {
+                        use rustc::ty::layout::Primitive::*;
+                        match value {
+                            // TODO(solson): Does signedness matter here? What should the sign be?
+                            Int(int) => PrimValKind::from_uint_size(int.size().bytes()),
+                            F32 => PrimValKind::F32,
+                            F64 => PrimValKind::F64,
+                            Pointer => PrimValKind::Ptr,
+                        }
+                    }
+
+                    // represent single field structs as their single field
+                    Univariant { .. } => {
+                        // enums with just one variant are no different, but `.struct_variant()` doesn't work for enums
+                        let variant = &def.variants[0];
+                        // FIXME: also allow structs with only a single non zst field
+                        if variant.fields.len() == 1 {
+                            return self.ty_to_primval_kind(variant.fields[0].ty(self.tcx, substs));
+                        } else {
+                            return err!(TypeNotPrimitive(ty));
+                        }
+                    }
+
+                    _ => return err!(TypeNotPrimitive(ty)),
+                }
+            }
+
+            _ => return err!(TypeNotPrimitive(ty)),
+        };
+
+        Ok(kind)
+    }
+
+    fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> {
+        match ty.sty {
+            ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool),
+
+            ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => {
+                err!(InvalidChar(val.to_bytes()? as u32 as u128))
+            }
+
+            _ => Ok(()),
+        }
+    }
+
+    pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
+        if let Some(val) = self.try_read_value(ptr, ty)? {
+            Ok(val)
+        } else {
+            bug!("primitive read failed for type: {:?}", ty);
+        }
+    }
+
+    pub(crate) fn read_ptr(
+        &self,
+        ptr: MemoryPointer,
+        pointee_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, Value> {
+        let ptr_size = self.memory.pointer_size();
+        let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into();
+        if self.type_is_sized(pointee_ty) {
+            Ok(p.to_value())
+        } else {
+            trace!("reading fat pointer extra of type {}", pointee_ty);
+            let extra = ptr.offset(ptr_size, self)?;
+            match self.tcx.struct_tail(pointee_ty).sty {
+                ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
+                    self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?,
+                )),
+                ty::TySlice(..) | ty::TyStr => Ok(
+                    p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64),
+                ),
+                _ => bug!("unsized primval ptr read from {:?}", pointee_ty),
+            }
+        }
+    }
+
+    fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
+        use syntax::ast::FloatTy;
+
+        let ptr = ptr.to_ptr()?;
+        let val = match ty.sty {
+            ty::TyBool => {
+                let val = self.memory.read_primval(ptr, 1, false)?;
+                let val = match val {
+                    PrimVal::Bytes(0) => false,
+                    PrimVal::Bytes(1) => true,
+                    // TODO: This seems a little overeager, should reading at bool type already be insta-UB?
+                    _ => return err!(InvalidBool),
+                };
+                PrimVal::from_bool(val)
+            }
+            ty::TyChar => {
+                let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32;
+                match ::std::char::from_u32(c) {
+                    Some(ch) => PrimVal::from_char(ch),
+                    None => return err!(InvalidChar(c as u128)),
+                }
+            }
+
+            ty::TyInt(int_ty) => {
+                use syntax::ast::IntTy::*;
+                let size = match int_ty {
+                    I8 => 1,
+                    I16 => 2,
+                    I32 => 4,
+                    I64 => 8,
+                    I128 => 16,
+                    Is => self.memory.pointer_size(),
+                };
+                self.memory.read_primval(ptr, size, true)?
+            }
+
+            ty::TyUint(uint_ty) => {
+                use syntax::ast::UintTy::*;
+                let size = match uint_ty {
+                    U8 => 1,
+                    U16 => 2,
+                    U32 => 4,
+                    U64 => 8,
+                    U128 => 16,
+                    Us => self.memory.pointer_size(),
+                };
+                self.memory.read_primval(ptr, size, false)?
+            }
+
+            ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?),
+            ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?),
+
+            ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?,
+            ty::TyRef(_, ref tam) |
+            ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some),
+
+            ty::TyAdt(def, _) => {
+                if def.is_box() {
+                    return self.read_ptr(ptr, ty.boxed_ty()).map(Some);
+                }
+                use rustc::ty::layout::Layout::*;
+                if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
+                    let size = discr.size().bytes();
+                    self.memory.read_primval(ptr, size, signed)?
+                } else {
+                    return Ok(None);
+                }
+            }
+
+            _ => return Ok(None),
+        };
+
+        Ok(Some(Value::ByVal(val)))
+    }
+
+    pub fn frame(&self) -> &Frame<'tcx> {
+        self.stack.last().expect("no call frames exist")
+    }
+
+    pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> {
+        self.stack.last_mut().expect("no call frames exist")
+    }
+
+    pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> {
+        self.frame().mir
+    }
+
+    pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
+        self.frame().instance.substs
+    }
+
+    fn unsize_into_ptr(
+        &mut self,
+        src: Value,
+        src_ty: Ty<'tcx>,
+        dest: Lvalue,
+        dest_ty: Ty<'tcx>,
+        sty: Ty<'tcx>,
+        dty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        // A<Struct> -> A<Trait> conversion
+        let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
+
+        match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
+            (&ty::TyArray(_, length), &ty::TySlice(_)) => {
+                let ptr = src.into_ptr(&self.memory)?;
+                // u64 cast is from usize to u64, which is always good
+                let valty = ValTy {
+                    value: ptr.to_value_with_len(length.val.to_const_int().unwrap().to_u64().unwrap() ),
+                    ty: dest_ty,
+                };
+                self.write_value(valty, dest)
+            }
+            (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
+                // For now, upcasts are limited to changes in marker
+                // traits, and hence never actually require an actual
+                // change to the vtable.
+                let valty = ValTy {
+                    value: src,
+                    ty: dest_ty,
+                };
+                self.write_value(valty, dest)
+            }
+            (_, &ty::TyDynamic(ref data, _)) => {
+                let trait_ref = data.principal().unwrap().with_self_ty(
+                    self.tcx,
+                    src_pointee_ty,
+                );
+                let trait_ref = self.tcx.erase_regions(&trait_ref);
+                let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
+                let ptr = src.into_ptr(&self.memory)?;
+                let valty = ValTy {
+                    value: ptr.to_value_with_vtable(vtable),
+                    ty: dest_ty,
+                };
+                self.write_value(valty, dest)
+            }
+
+            _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
+        }
+    }
+
+    fn unsize_into(
+        &mut self,
+        src: Value,
+        src_ty: Ty<'tcx>,
+        dest: Lvalue,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        match (&src_ty.sty, &dest_ty.sty) {
+            (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
+            (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
+            (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => {
+                self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty)
+            }
+            (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
+                if def_a.is_box() || def_b.is_box() {
+                    if !def_a.is_box() || !def_b.is_box() {
+                        panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty);
+                    }
+                    return self.unsize_into_ptr(
+                        src,
+                        src_ty,
+                        dest,
+                        dest_ty,
+                        src_ty.boxed_ty(),
+                        dest_ty.boxed_ty(),
+                    );
+                }
+                if self.ty_to_primval_kind(src_ty).is_ok() {
+                    // TODO: We ignore the packed flag here
+                    let sty = self.get_field_ty(src_ty, 0)?.ty;
+                    let dty = self.get_field_ty(dest_ty, 0)?.ty;
+                    return self.unsize_into(src, sty, dest, dty);
+                }
+                // unsizing of generic struct with pointer fields
+                // Example: `Arc<T>` -> `Arc<Trait>`
+                // here we need to increase the size of every &T thin ptr field to a fat ptr
+
+                assert_eq!(def_a, def_b);
+
+                let src_fields = def_a.variants[0].fields.iter();
+                let dst_fields = def_b.variants[0].fields.iter();
+
+                //let src = adt::MaybeSizedValue::sized(src);
+                //let dst = adt::MaybeSizedValue::sized(dst);
+                let src_ptr = match src {
+                    Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
+                    // TODO: Is it possible for unaligned pointers to occur here?
+                    _ => bug!("expected aligned pointer, got {:?}", src),
+                };
+
+                // FIXME(solson)
+                let dest = self.force_allocation(dest)?.to_ptr()?;
+                let iter = src_fields.zip(dst_fields).enumerate();
+                for (i, (src_f, dst_f)) in iter {
+                    let src_fty = self.field_ty(substs_a, src_f);
+                    let dst_fty = self.field_ty(substs_b, dst_f);
+                    if self.type_size(dst_fty)? == Some(0) {
+                        continue;
+                    }
+                    let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
+                    let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
+                    let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
+                    let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
+                    if src_fty == dst_fty {
+                        self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
+                    } else {
+                        self.unsize_into(
+                            Value::by_ref(src_f_ptr),
+                            src_fty,
+                            Lvalue::from_ptr(dst_f_ptr),
+                            dst_fty,
+                        )?;
+                    }
+                }
+                Ok(())
+            }
+            _ => {
+                bug!(
+                    "unsize_into: invalid conversion: {:?} -> {:?}",
+                    src_ty,
+                    dest_ty
+                )
+            }
+        }
+    }
+
+    pub fn dump_local(&self, lvalue: Lvalue) {
+        // Debug output
+        match lvalue {
+            Lvalue::Local { frame, local } => {
+                let mut allocs = Vec::new();
+                let mut msg = format!("{:?}", local);
+                if frame != self.cur_frame() {
+                    write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
+                }
+                write!(msg, ":").unwrap();
+
+                match self.stack[frame].get_local(local) {
+                    Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }) => {
+                        write!(msg, " is dead").unwrap();
+                    }
+                    Err(err) => {
+                        panic!("Failed to access local: {:?}", err);
+                    }
+                    Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => {
+                        match ptr.into_inner_primval() {
+                            PrimVal::Ptr(ptr) => {
+                                write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " })
+                                    .unwrap();
+                                allocs.push(ptr.alloc_id);
+                            }
+                            ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
+                        }
+                    }
+                    Ok(Value::ByVal(val)) => {
+                        write!(msg, " {:?}", val).unwrap();
+                        if let PrimVal::Ptr(ptr) = val {
+                            allocs.push(ptr.alloc_id);
+                        }
+                    }
+                    Ok(Value::ByValPair(val1, val2)) => {
+                        write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
+                        if let PrimVal::Ptr(ptr) = val1 {
+                            allocs.push(ptr.alloc_id);
+                        }
+                        if let PrimVal::Ptr(ptr) = val2 {
+                            allocs.push(ptr.alloc_id);
+                        }
+                    }
+                }
+
+                trace!("{}", msg);
+                self.memory.dump_allocs(allocs);
+            }
+            Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => {
+                match ptr.into_inner_primval() {
+                    PrimVal::Ptr(ptr) => {
+                        trace!("by {}ref:", if aligned { "" } else { "unaligned " });
+                        self.memory.dump_alloc(ptr.alloc_id);
+                    }
+                    ptr => trace!(" integral by ref: {:?}", ptr),
+                }
+            }
+        }
+    }
+
+    /// Convenience function to ensure correct usage of locals
+    pub fn modify_local<F>(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx>
+    where
+        F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
+    {
+        let val = self.stack[frame].get_local(local)?;
+        let new_val = f(self, val)?;
+        self.stack[frame].set_local(local, new_val)?;
+        // FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
+        // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
+        //     self.memory.deallocate(ptr)?;
+        // }
+        Ok(())
+    }
+
+    pub fn report(&self, e: &mut EvalError) {
+        if let Some(ref mut backtrace) = e.backtrace {
+            let mut trace_text = "\n\nAn error occurred in miri:\n".to_string();
+            let mut skip_init = true;
+            backtrace.resolve();
+            'frames: for (i, frame) in backtrace.frames().iter().enumerate() {
+                for symbol in frame.symbols() {
+                    if let Some(name) = symbol.name() {
+                        // unmangle the symbol via `to_string`
+                        let name = name.to_string();
+                        if name.starts_with("miri::after_analysis") {
+                            // don't report initialization gibberish
+                            break 'frames;
+                        } else if name.starts_with("backtrace::capture::Backtrace::new")
+                            // debug mode produces funky symbol names
+                            || name.starts_with("backtrace::capture::{{impl}}::new")
+                        {
+                            // don't report backtrace internals
+                            skip_init = false;
+                            continue 'frames;
+                        }
+                    }
+                }
+                if skip_init {
+                    continue;
+                }
+                for symbol in frame.symbols() {
+                    write!(trace_text, "{}: ", i).unwrap();
+                    if let Some(name) = symbol.name() {
+                        write!(trace_text, "{}\n", name).unwrap();
+                    } else {
+                        write!(trace_text, "<unknown>\n").unwrap();
+                    }
+                    write!(trace_text, "\tat ").unwrap();
+                    if let Some(file_path) = symbol.filename() {
+                        write!(trace_text, "{}", file_path.display()).unwrap();
+                    } else {
+                        write!(trace_text, "<unknown_file>").unwrap();
+                    }
+                    if let Some(line) = symbol.lineno() {
+                        write!(trace_text, ":{}\n", line).unwrap();
+                    } else {
+                        write!(trace_text, "\n").unwrap();
+                    }
+                }
+            }
+            error!("{}", trace_text);
+        }
+        if let Some(frame) = self.stack().last() {
+            let block = &frame.mir.basic_blocks()[frame.block];
+            let span = if frame.stmt < block.statements.len() {
+                block.statements[frame.stmt].source_info.span
+            } else {
+                block.terminator().source_info.span
+            };
+            let mut err = self.tcx.sess.struct_span_err(span, &e.to_string());
+            for &Frame { instance, span, .. } in self.stack().iter().rev() {
+                if self.tcx.def_key(instance.def_id()).disambiguated_data.data ==
+                    DefPathData::ClosureExpr
+                {
+                    err.span_note(span, "inside call to closure");
+                    continue;
+                }
+                err.span_note(span, &format!("inside call to {}", instance));
+            }
+            err.emit();
+        } else {
+            self.tcx.sess.err(&e.to_string());
+        }
+    }
+}
+
+impl<'tcx> Frame<'tcx> {
+    pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
+        // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
+        self.locals[local.index() - 1].ok_or(EvalErrorKind::DeadLocal.into())
+    }
+
+    fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
+        // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
+        match self.locals[local.index() - 1] {
+            None => err!(DeadLocal),
+            Some(ref mut local) => {
+                *local = value;
+                Ok(())
+            }
+        }
+    }
+
+    pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
+        trace!("{:?} is now live", local);
+
+        let old = self.locals[local.index() - 1];
+        self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored
+        return Ok(old);
+    }
+
+    /// Returns the old value of the local
+    pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
+        trace!("{:?} is now dead", local);
+
+        let old = self.locals[local.index() - 1];
+        self.locals[local.index() - 1] = None;
+        return Ok(old);
+    }
+}
+
+// TODO(solson): Upstream these methods into rustc::ty::layout.
+
+pub(super) trait IntegerExt {
+    fn size(self) -> Size;
+}
+
+impl IntegerExt for layout::Integer {
+    fn size(self) -> Size {
+        use rustc::ty::layout::Integer::*;
+        match self {
+            I1 | I8 => Size::from_bits(8),
+            I16 => Size::from_bits(16),
+            I32 => Size::from_bits(32),
+            I64 => Size::from_bits(64),
+            I128 => Size::from_bits(128),
+        }
+    }
+}
+
+/// FIXME: expose trans::monomorphize::resolve_closure
+pub fn resolve_closure<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    def_id: DefId,
+    substs: ty::ClosureSubsts<'tcx>,
+    requested_kind: ty::ClosureKind,
+) -> ty::Instance<'tcx> {
+    let actual_kind = tcx.closure_kind(def_id);
+    match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
+        Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
+        _ => ty::Instance::new(def_id, substs.substs),
+    }
+}
+
+fn fn_once_adapter_instance<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    closure_did: DefId,
+    substs: ty::ClosureSubsts<'tcx>,
+) -> ty::Instance<'tcx> {
+    debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs);
+    let fn_once = tcx.lang_items().fn_once_trait().unwrap();
+    let call_once = tcx.associated_items(fn_once)
+        .find(|it| it.kind == ty::AssociatedKind::Method)
+        .unwrap()
+        .def_id;
+    let def = ty::InstanceDef::ClosureOnceShim { call_once };
+
+    let self_ty = tcx.mk_closure_from_closure_substs(closure_did, substs);
+
+    let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs);
+    let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
+    assert_eq!(sig.inputs().len(), 1);
+    let substs = tcx.mk_substs(
+        [Kind::from(self_ty), Kind::from(sig.inputs()[0])]
+            .iter()
+            .cloned(),
+    );
+
+    debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
+    ty::Instance { def, substs }
+}
+
+fn needs_fn_once_adapter_shim(
+    actual_closure_kind: ty::ClosureKind,
+    trait_closure_kind: ty::ClosureKind,
+) -> Result<bool, ()> {
+    match (actual_closure_kind, trait_closure_kind) {
+        (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
+        (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
+        (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
+            // No adapter needed.
+            Ok(false)
+        }
+        (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
+            // The closure fn `llfn` is a `fn(&self, ...)`.  We want a
+            // `fn(&mut self, ...)`. In fact, at trans time, these are
+            // basically the same thing, so we can just return llfn.
+            Ok(false)
+        }
+        (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
+        (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
+            // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
+            // self, ...)`.  We want a `fn(self, ...)`. We can produce
+            // this by doing something like:
+            //
+            //     fn call_once(self, ...) { call_mut(&self, ...) }
+            //     fn call_once(mut self, ...) { call_mut(&mut self, ...) }
+            //
+            // These are both the same at trans time.
+            Ok(true)
+        }
+        _ => Err(()),
+    }
+}
+
+/// The point where linking happens. Resolve a (def_id, substs)
+/// pair to an instance.
+pub fn resolve<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    def_id: DefId,
+    substs: &'tcx Substs<'tcx>,
+) -> ty::Instance<'tcx> {
+    debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
+    let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
+        debug!(" => associated item, attempting to find impl");
+        let item = tcx.associated_item(def_id);
+        resolve_associated_item(tcx, &item, trait_def_id, substs)
+    } else {
+        let item_type = def_ty(tcx, def_id, substs);
+        let def = match item_type.sty {
+            ty::TyFnDef(..)
+                if {
+                       let f = item_type.fn_sig(tcx);
+                       f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic
+                   } => {
+                debug!(" => intrinsic");
+                ty::InstanceDef::Intrinsic(def_id)
+            }
+            _ => {
+                if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
+                    let ty = substs.type_at(0);
+                    if needs_drop_glue(tcx, ty) {
+                        debug!(" => nontrivial drop glue");
+                        ty::InstanceDef::DropGlue(def_id, Some(ty))
+                    } else {
+                        debug!(" => trivial drop glue");
+                        ty::InstanceDef::DropGlue(def_id, None)
+                    }
+                } else {
+                    debug!(" => free item");
+                    ty::InstanceDef::Item(def_id)
+                }
+            }
+        };
+        ty::Instance { def, substs }
+    };
+    debug!(
+        "resolve(def_id={:?}, substs={:?}) = {}",
+        def_id,
+        substs,
+        result
+    );
+    result
+}
+
+pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool {
+    assert!(t.is_normalized_for_trans());
+
+    let t = tcx.erase_regions(&t);
+
+    // FIXME (#22815): note that type_needs_drop conservatively
+    // approximates in some cases and may say a type expression
+    // requires drop glue when it actually does not.
+    //
+    // (In this case it is not clear whether any harm is done, i.e.
+    // erroneously returning `true` in some cases where we could have
+    // returned `false` does not appear unsound. The impact on
+    // code quality is unknown at this time.)
+
+    let env = ty::ParamEnv::empty(Reveal::All);
+    if !t.needs_drop(tcx, env) {
+        return false;
+    }
+    match t.sty {
+        ty::TyAdt(def, _) if def.is_box() => {
+            let typ = t.boxed_ty();
+            if !typ.needs_drop(tcx, env) && type_is_sized(tcx, typ) {
+                let layout = t.layout(tcx, ty::ParamEnv::empty(Reveal::All)).unwrap();
+                // `Box<ZeroSizeType>` does not allocate.
+                layout.size(&tcx.data_layout).bytes() != 0
+            } else {
+                true
+            }
+        }
+        _ => true,
+    }
+}
+
+fn resolve_associated_item<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    trait_item: &ty::AssociatedItem,
+    trait_id: DefId,
+    rcvr_substs: &'tcx Substs<'tcx>,
+) -> ty::Instance<'tcx> {
+    let def_id = trait_item.def_id;
+    debug!(
+        "resolve_associated_item(trait_item={:?}, \
+                                    trait_id={:?}, \
+                                    rcvr_substs={:?})",
+        def_id,
+        trait_id,
+        rcvr_substs
+    );
+
+    let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
+    let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::Binder(trait_ref));
+
+    // Now that we know which impl is being used, we can dispatch to
+    // the actual function:
+    match vtbl {
+        ::rustc::traits::VtableImpl(impl_data) => {
+            let (def_id, substs) =
+                ::rustc::traits::find_associated_item(tcx, trait_item, rcvr_substs, &impl_data);
+            let substs = tcx.erase_regions(&substs);
+            ty::Instance::new(def_id, substs)
+        }
+        ::rustc::traits::VtableGenerator(closure_data) => {
+            ty::Instance {
+                def: ty::InstanceDef::Item(closure_data.closure_def_id),
+                substs: closure_data.substs.substs
+            }
+        }
+        ::rustc::traits::VtableClosure(closure_data) => {
+            let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap();
+            resolve_closure(
+                tcx,
+                closure_data.closure_def_id,
+                closure_data.substs,
+                trait_closure_kind,
+            )
+        }
+        ::rustc::traits::VtableFnPointer(ref data) => {
+            ty::Instance {
+                def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
+                substs: rcvr_substs,
+            }
+        }
+        ::rustc::traits::VtableObject(ref data) => {
+            let index = tcx.get_vtable_index_of_object_method(data, def_id);
+            ty::Instance {
+                def: ty::InstanceDef::Virtual(def_id, index),
+                substs: rcvr_substs,
+            }
+        }
+        ::rustc::traits::VtableBuiltin(..) if Some(trait_id) == tcx.lang_items().clone_trait() => {
+            ty::Instance {
+                def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()),
+                substs: rcvr_substs
+            }
+        }
+        _ => bug!("static call to invalid vtable: {:?}", vtbl),
+    }
+}
+
+pub fn def_ty<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    def_id: DefId,
+    substs: &'tcx Substs<'tcx>,
+) -> Ty<'tcx> {
+    let ty = tcx.type_of(def_id);
+    apply_param_substs(tcx, substs, &ty)
+}
+
+/// Monomorphizes a type from the AST by first applying the in-scope
+/// substitutions and then normalizing any associated types.
+pub fn apply_param_substs<'a, 'tcx, T>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    param_substs: &Substs<'tcx>,
+    value: &T,
+) -> T
+where
+    T: ::rustc::infer::TransNormalize<'tcx>,
+{
+    debug!(
+        "apply_param_substs(param_substs={:?}, value={:?})",
+        param_substs,
+        value
+    );
+    let substituted = value.subst(tcx, param_substs);
+    let substituted = tcx.erase_regions(&substituted);
+    AssociatedTypeNormalizer { tcx }.fold(&substituted)
+}
+
+
+struct AssociatedTypeNormalizer<'a, 'tcx: 'a> {
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+}
+
+impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> {
+    fn fold<T: TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
+        if !value.has_projections() {
+            value.clone()
+        } else {
+            value.fold_with(self)
+        }
+    }
+}
+
+impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> {
+    fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
+        self.tcx
+    }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        if !ty.has_projections() {
+            ty
+        } else {
+            self.tcx.normalize_associated_type(&ty)
+        }
+    }
+}
+
+fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
+    // generics are weird, don't run this function on a generic
+    assert!(!ty.needs_subst());
+    ty.is_sized(tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
+}
+
+pub fn resolve_drop_in_place<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    ty: Ty<'tcx>,
+) -> ty::Instance<'tcx> {
+    let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem);
+    let substs = tcx.intern_substs(&[Kind::from(ty)]);
+    resolve(tcx, def_id, substs)
+}
diff --git a/src/librustc/mir/interpret/lvalue.rs b/src/librustc/mir/interpret/lvalue.rs
new file mode 100644 (file)
index 0000000..36b396a
--- /dev/null
@@ -0,0 +1,506 @@
+use rustc::mir;
+use rustc::ty::layout::{Size, Align};
+use rustc::ty::{self, Ty};
+use rustc_data_structures::indexed_vec::Idx;
+
+use super::{EvalResult, EvalContext, MemoryPointer, PrimVal, Value, Pointer, Machine, PtrAndAlign, ValTy};
+
+#[derive(Copy, Clone, Debug)]
+pub enum Lvalue {
+    /// An lvalue referring to a value allocated in the `Memory` system.
+    Ptr {
+        /// An lvalue may have an invalid (integral or undef) pointer,
+        /// since it might be turned back into a reference
+        /// before ever being dereferenced.
+        ptr: PtrAndAlign,
+        extra: LvalueExtra,
+    },
+
+    /// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
+    /// a Mir local index.
+    Local { frame: usize, local: mir::Local },
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum LvalueExtra {
+    None,
+    Length(u64),
+    Vtable(MemoryPointer),
+    DowncastVariant(usize),
+}
+
+/// Uniquely identifies a specific constant or static.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+pub struct GlobalId<'tcx> {
+    /// For a constant or static, the `Instance` of the item itself.
+    /// For a promoted global, the `Instance` of the function they belong to.
+    pub instance: ty::Instance<'tcx>,
+
+    /// The index for promoted globals within their function's `Mir`.
+    pub promoted: Option<mir::Promoted>,
+}
+
+impl<'tcx> Lvalue {
+    /// Produces an Lvalue that will error if attempted to be read from
+    pub fn undef() -> Self {
+        Self::from_primval_ptr(PrimVal::Undef.into())
+    }
+
+    pub fn from_primval_ptr(ptr: Pointer) -> Self {
+        Lvalue::Ptr {
+            ptr: PtrAndAlign { ptr, aligned: true },
+            extra: LvalueExtra::None,
+        }
+    }
+
+    pub fn from_ptr(ptr: MemoryPointer) -> Self {
+        Self::from_primval_ptr(ptr.into())
+    }
+
+    pub(super) fn to_ptr_extra_aligned(self) -> (PtrAndAlign, LvalueExtra) {
+        match self {
+            Lvalue::Ptr { ptr, extra } => (ptr, extra),
+            _ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
+
+        }
+    }
+
+    pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
+        let (ptr, extra) = self.to_ptr_extra_aligned();
+        // At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
+        // and no matter where it came from, it now must be aligned.
+        assert_eq!(extra, LvalueExtra::None);
+        ptr.to_ptr()
+    }
+
+    pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
+        match ty.sty {
+            ty::TyArray(elem, n) => (elem, n.val.to_const_int().unwrap().to_u64().unwrap() as u64),
+
+            ty::TySlice(elem) => {
+                match self {
+                    Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len),
+                    _ => {
+                        bug!(
+                            "elem_ty_and_len of a TySlice given non-slice lvalue: {:?}",
+                            self
+                        )
+                    }
+                }
+            }
+
+            _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
+        }
+    }
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    /// Reads a value from the lvalue without going through the intermediate step of obtaining
+    /// a `miri::Lvalue`
+    pub fn try_read_lvalue(
+        &mut self,
+        lvalue: &mir::Lvalue<'tcx>,
+    ) -> EvalResult<'tcx, Option<Value>> {
+        use rustc::mir::Lvalue::*;
+        match *lvalue {
+            // Might allow this in the future, right now there's no way to do this from Rust code anyway
+            Local(mir::RETURN_POINTER) => err!(ReadFromReturnPointer),
+            // Directly reading a local will always succeed
+            Local(local) => self.frame().get_local(local).map(Some),
+            // Directly reading a static will always succeed
+            Static(ref static_) => {
+                let instance = ty::Instance::mono(self.tcx, static_.def_id);
+                let cid = GlobalId {
+                    instance,
+                    promoted: None,
+                };
+                Ok(Some(Value::ByRef(
+                    *self.globals.get(&cid).expect("global not cached"),
+                )))
+            }
+            Projection(ref proj) => self.try_read_lvalue_projection(proj),
+        }
+    }
+
+    fn try_read_lvalue_projection(
+        &mut self,
+        proj: &mir::LvalueProjection<'tcx>,
+    ) -> EvalResult<'tcx, Option<Value>> {
+        use rustc::mir::ProjectionElem::*;
+        let base = match self.try_read_lvalue(&proj.base)? {
+            Some(base) => base,
+            None => return Ok(None),
+        };
+        let base_ty = self.lvalue_ty(&proj.base);
+        match proj.elem {
+            Field(field, _) => match (field.index(), base) {
+                // the only field of a struct
+                (0, Value::ByVal(val)) => Ok(Some(Value::ByVal(val))),
+                // split fat pointers, 2 element tuples, ...
+                (0...1, Value::ByValPair(a, b)) if self.get_field_count(base_ty)? == 2 => {
+                    let val = [a, b][field.index()];
+                    Ok(Some(Value::ByVal(val)))
+                },
+                // the only field of a struct is a fat pointer
+                (0, Value::ByValPair(..)) => Ok(Some(base)),
+                _ => Ok(None),
+            },
+            // The NullablePointer cases should work fine, need to take care for normal enums
+            Downcast(..) |
+            Subslice { .. } |
+            // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
+            ConstantIndex { .. } | Index(_) |
+            // No way to optimize this projection any better than the normal lvalue path
+            Deref => Ok(None),
+        }
+    }
+
+    /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
+    pub(super) fn eval_and_read_lvalue(
+        &mut self,
+        lvalue: &mir::Lvalue<'tcx>,
+    ) -> EvalResult<'tcx, Value> {
+        // Shortcut for things like accessing a fat pointer's field,
+        // which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory
+        // and returning an `Lvalue::Ptr` to it
+        if let Some(val) = self.try_read_lvalue(lvalue)? {
+            return Ok(val);
+        }
+        let lvalue = self.eval_lvalue(lvalue)?;
+        self.read_lvalue(lvalue)
+    }
+
+    pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> {
+        match lvalue {
+            Lvalue::Ptr { ptr, extra } => {
+                assert_eq!(extra, LvalueExtra::None);
+                Ok(Value::ByRef(ptr))
+            }
+            Lvalue::Local { frame, local } => self.stack[frame].get_local(local),
+        }
+    }
+
+    pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> {
+        use rustc::mir::Lvalue::*;
+        let lvalue = match *mir_lvalue {
+            Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
+            Local(local) => Lvalue::Local {
+                frame: self.cur_frame(),
+                local,
+            },
+
+            Static(ref static_) => {
+                let instance = ty::Instance::mono(self.tcx, static_.def_id);
+                let gid = GlobalId {
+                    instance,
+                    promoted: None,
+                };
+                Lvalue::Ptr {
+                    ptr: *self.globals.get(&gid).expect("uncached global"),
+                    extra: LvalueExtra::None,
+                }
+            }
+
+            Projection(ref proj) => {
+                let ty = self.lvalue_ty(&proj.base);
+                let lvalue = self.eval_lvalue(&proj.base)?;
+                return self.eval_lvalue_projection(lvalue, ty, &proj.elem);
+            }
+        };
+
+        if log_enabled!(::log::LogLevel::Trace) {
+            self.dump_local(lvalue);
+        }
+
+        Ok(lvalue)
+    }
+
+    pub fn lvalue_field(
+        &mut self,
+        base: Lvalue,
+        field: mir::Field,
+        base_ty: Ty<'tcx>,
+        field_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, Lvalue> {
+        use rustc::ty::layout::Layout::*;
+
+        let base_layout = self.type_layout(base_ty)?;
+        let field_index = field.index();
+        let (offset, packed) = match *base_layout {
+            Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed),
+
+            // mir optimizations treat single variant enums as structs
+            General { ref variants, .. } if variants.len() == 1 => {
+                (variants[0].offsets[field_index], variants[0].packed)
+            }
+
+            General { ref variants, .. } => {
+                let (_, base_extra) = base.to_ptr_extra_aligned();
+                if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
+                    // +1 for the discriminant, which is field 0
+                    assert!(!variants[variant_idx].packed);
+                    (variants[variant_idx].offsets[field_index + 1], false)
+                } else {
+                    bug!("field access on enum had no variant index");
+                }
+            }
+
+            RawNullablePointer { .. } => {
+                assert_eq!(field_index, 0);
+                return Ok(base);
+            }
+
+            StructWrappedNullablePointer { ref nonnull, .. } => {
+                (nonnull.offsets[field_index], nonnull.packed)
+            }
+
+            UntaggedUnion { .. } => return Ok(base),
+
+            Vector { element, count } => {
+                let field = field_index as u64;
+                assert!(field < count);
+                let elem_size = element.size(&self.tcx.data_layout).bytes();
+                (Size::from_bytes(field * elem_size), false)
+            }
+
+            // We treat arrays + fixed sized indexing like field accesses
+            Array { .. } => {
+                let field = field_index as u64;
+                let elem_size = match base_ty.sty {
+                    ty::TyArray(elem_ty, n) => {
+                        assert!(field < n.val.to_const_int().unwrap().to_u64().unwrap() as u64);
+                        self.type_size(elem_ty)?.expect("array elements are sized") as u64
+                    }
+                    _ => {
+                        bug!(
+                            "lvalue_field: got Array layout but non-array type {:?}",
+                            base_ty
+                        )
+                    }
+                };
+                (Size::from_bytes(field * elem_size), false)
+            }
+
+            FatPointer { .. } => {
+                let bytes = field_index as u64 * self.memory.pointer_size();
+                let offset = Size::from_bytes(bytes);
+                (offset, false)
+            }
+
+            _ => bug!("field access on non-product type: {:?}", base_layout),
+        };
+
+        // Do not allocate in trivial cases
+        let (base_ptr, base_extra) = match base {
+            Lvalue::Ptr { ptr, extra } => (ptr, extra),
+            Lvalue::Local { frame, local } => {
+                match self.stack[frame].get_local(local)? {
+                    // in case the type has a single field, just return the value
+                    Value::ByVal(_)
+                        if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(
+                            false,
+                        ) => {
+                        assert_eq!(
+                            offset.bytes(),
+                            0,
+                            "ByVal can only have 1 non zst field with offset 0"
+                        );
+                        return Ok(base);
+                    }
+                    Value::ByRef { .. } |
+                    Value::ByValPair(..) |
+                    Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
+                }
+            }
+        };
+
+        let offset = match base_extra {
+            LvalueExtra::Vtable(tab) => {
+                let (_, align) = self.size_and_align_of_dst(
+                    base_ty,
+                    base_ptr.ptr.to_value_with_vtable(tab),
+                )?;
+                offset
+                    .abi_align(Align::from_bytes(align, align).unwrap())
+                    .bytes()
+            }
+            _ => offset.bytes(),
+        };
+
+        let mut ptr = base_ptr.offset(offset, &self)?;
+        // if we were unaligned, stay unaligned
+        // no matter what we were, if we are packed, we must not be aligned anymore
+        ptr.aligned &= !packed;
+
+        let field_ty = self.monomorphize(field_ty, self.substs());
+
+        let extra = if self.type_is_sized(field_ty) {
+            LvalueExtra::None
+        } else {
+            match base_extra {
+                LvalueExtra::None => bug!("expected fat pointer"),
+                LvalueExtra::DowncastVariant(..) => {
+                    bug!("Rust doesn't support unsized fields in enum variants")
+                }
+                LvalueExtra::Vtable(_) |
+                LvalueExtra::Length(_) => {}
+            }
+            base_extra
+        };
+
+        Ok(Lvalue::Ptr { ptr, extra })
+    }
+
+    pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> {
+        Ok(match self.tcx.struct_tail(ty).sty {
+            ty::TyDynamic(..) => {
+                let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
+                Lvalue::Ptr {
+                    ptr: PtrAndAlign { ptr, aligned: true },
+                    extra: LvalueExtra::Vtable(vtable),
+                }
+            }
+            ty::TyStr | ty::TySlice(_) => {
+                let (ptr, len) = val.into_slice(&self.memory)?;
+                Lvalue::Ptr {
+                    ptr: PtrAndAlign { ptr, aligned: true },
+                    extra: LvalueExtra::Length(len),
+                }
+            }
+            _ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?),
+        })
+    }
+
+    pub(super) fn lvalue_index(
+        &mut self,
+        base: Lvalue,
+        outer_ty: Ty<'tcx>,
+        n: u64,
+    ) -> EvalResult<'tcx, Lvalue> {
+        // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
+        let base = self.force_allocation(base)?;
+        let (base_ptr, _) = base.to_ptr_extra_aligned();
+
+        let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
+        let elem_size = self.type_size(elem_ty)?.expect(
+            "slice element must be sized",
+        );
+        assert!(
+            n < len,
+            "Tried to access element {} of array/slice with length {}",
+            n,
+            len
+        );
+        let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
+        Ok(Lvalue::Ptr {
+            ptr,
+            extra: LvalueExtra::None,
+        })
+    }
+
+    pub(super) fn eval_lvalue_projection(
+        &mut self,
+        base: Lvalue,
+        base_ty: Ty<'tcx>,
+        proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
+    ) -> EvalResult<'tcx, Lvalue> {
+        use rustc::mir::ProjectionElem::*;
+        let (ptr, extra) = match *proj_elem {
+            Field(field, field_ty) => {
+                return self.lvalue_field(base, field, base_ty, field_ty);
+            }
+
+            Downcast(_, variant) => {
+                let base_layout = self.type_layout(base_ty)?;
+                // FIXME(solson)
+                let base = self.force_allocation(base)?;
+                let (base_ptr, base_extra) = base.to_ptr_extra_aligned();
+
+                use rustc::ty::layout::Layout::*;
+                let extra = match *base_layout {
+                    General { .. } => LvalueExtra::DowncastVariant(variant),
+                    RawNullablePointer { .. } |
+                    StructWrappedNullablePointer { .. } => base_extra,
+                    _ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
+                };
+                (base_ptr, extra)
+            }
+
+            Deref => {
+                let val = self.read_lvalue(base)?;
+
+                let pointee_type = match base_ty.sty {
+                    ty::TyRawPtr(ref tam) |
+                    ty::TyRef(_, ref tam) => tam.ty,
+                    ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
+                    _ => bug!("can only deref pointer types"),
+                };
+
+                trace!("deref to {} on {:?}", pointee_type, val);
+
+                return self.val_to_lvalue(val, pointee_type);
+            }
+
+            Index(local) => {
+                let value = self.frame().get_local(local)?;
+                let ty = self.tcx.types.usize;
+                let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
+                return self.lvalue_index(base, base_ty, n);
+            }
+
+            ConstantIndex {
+                offset,
+                min_length,
+                from_end,
+            } => {
+                // FIXME(solson)
+                let base = self.force_allocation(base)?;
+                let (base_ptr, _) = base.to_ptr_extra_aligned();
+
+                let (elem_ty, n) = base.elem_ty_and_len(base_ty);
+                let elem_size = self.type_size(elem_ty)?.expect(
+                    "sequence element must be sized",
+                );
+                assert!(n >= min_length as u64);
+
+                let index = if from_end {
+                    n - u64::from(offset)
+                } else {
+                    u64::from(offset)
+                };
+
+                let ptr = base_ptr.offset(index * elem_size, &self)?;
+                (ptr, LvalueExtra::None)
+            }
+
+            Subslice { from, to } => {
+                // FIXME(solson)
+                let base = self.force_allocation(base)?;
+                let (base_ptr, _) = base.to_ptr_extra_aligned();
+
+                let (elem_ty, n) = base.elem_ty_and_len(base_ty);
+                let elem_size = self.type_size(elem_ty)?.expect(
+                    "slice element must be sized",
+                );
+                assert!(u64::from(from) <= n - u64::from(to));
+                let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
+                // sublicing arrays produces arrays
+                let extra = if self.type_is_sized(base_ty) {
+                    LvalueExtra::None
+                } else {
+                    LvalueExtra::Length(n - u64::from(to) - u64::from(from))
+                };
+                (ptr, extra)
+            }
+        };
+
+        Ok(Lvalue::Ptr { ptr, extra })
+    }
+
+    pub fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
+        self.monomorphize(
+            lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx),
+            self.substs(),
+        )
+    }
+}
diff --git a/src/librustc/mir/interpret/machine.rs b/src/librustc/mir/interpret/machine.rs
new file mode 100644 (file)
index 0000000..3df5d1b
--- /dev/null
@@ -0,0 +1,82 @@
+//! This module contains everything needed to instantiate an interpreter.
+//! This separation exists to ensure that no fancy miri features like
+//! interpreting common C functions leak into CTFE.
+
+use super::{EvalResult, EvalContext, Lvalue, PrimVal, ValTy};
+
+use rustc::{mir, ty};
+use syntax::codemap::Span;
+use syntax::ast::Mutability;
+
+/// Methods of this trait signifies a point where CTFE evaluation would fail
+/// and some use case dependent behaviour can instead be applied
+pub trait Machine<'tcx>: Sized {
+    /// Additional data that can be accessed via the EvalContext
+    type Data;
+
+    /// Additional data that can be accessed via the Memory
+    type MemoryData;
+
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+    type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone;
+
+    /// Entry point to all function calls.
+    ///
+    /// Returns Ok(true) when the function was handled completely
+    /// e.g. due to missing mir
+    ///
+    /// Returns Ok(false) if a new stack frame was pushed
+    fn eval_fn_call<'a>(
+        ecx: &mut EvalContext<'a, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        destination: Option<(Lvalue, mir::BasicBlock)>,
+        args: &[ValTy<'tcx>],
+        span: Span,
+        sig: ty::FnSig<'tcx>,
+    ) -> EvalResult<'tcx, bool>;
+
+    /// directly process an intrinsic without pushing a stack frame.
+    fn call_intrinsic<'a>(
+        ecx: &mut EvalContext<'a, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[ValTy<'tcx>],
+        dest: Lvalue,
+        dest_ty: ty::Ty<'tcx>,
+        dest_layout: &'tcx ty::layout::Layout,
+        target: mir::BasicBlock,
+    ) -> EvalResult<'tcx>;
+
+    /// Called for all binary operations except on float types.
+    ///
+    /// Returns `None` if the operation should be handled by the integer
+    /// op code in order to share more code between machines
+    ///
+    /// Returns a (value, overflowed) pair if the operation succeeded
+    fn try_ptr_op<'a>(
+        ecx: &EvalContext<'a, 'tcx, Self>,
+        bin_op: mir::BinOp,
+        left: PrimVal,
+        left_ty: ty::Ty<'tcx>,
+        right: PrimVal,
+        right_ty: ty::Ty<'tcx>,
+    ) -> EvalResult<'tcx, Option<(PrimVal, bool)>>;
+
+    /// Called when trying to mark machine defined `MemoryKinds` as static
+    fn mark_static_initialized(m: Self::MemoryKinds) -> EvalResult<'tcx>;
+
+    /// Heap allocations via the `box` keyword
+    ///
+    /// Returns a pointer to the allocated memory
+    fn box_alloc<'a>(
+        ecx: &mut EvalContext<'a, 'tcx, Self>,
+        ty: ty::Ty<'tcx>,
+        dest: Lvalue,
+    ) -> EvalResult<'tcx>;
+
+    /// Called when trying to access a global declared with a `linkage` attribute
+    fn global_item_with_linkage<'a>(
+        ecx: &mut EvalContext<'a, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        mutability: Mutability,
+    ) -> EvalResult<'tcx>;
+}
diff --git a/src/librustc/mir/interpret/memory.rs b/src/librustc/mir/interpret/memory.rs
new file mode 100644 (file)
index 0000000..bde7929
--- /dev/null
@@ -0,0 +1,1700 @@
+use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
+use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
+use std::{fmt, iter, ptr, mem, io};
+use std::cell::Cell;
+
+use rustc::ty::Instance;
+use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
+use syntax::ast::Mutability;
+use rustc::middle::region;
+
+use super::{EvalResult, EvalErrorKind, PrimVal, Pointer, EvalContext, DynamicLifetime, Machine,
+            RangeMap, AbsLvalue};
+
+////////////////////////////////////////////////////////////////////////////////
+// Locks
+////////////////////////////////////////////////////////////////////////////////
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum AccessKind {
+    Read,
+    Write,
+}
+
+/// Information about a lock that is currently held.
+#[derive(Clone, Debug)]
+struct LockInfo<'tcx> {
+    /// Stores for which lifetimes (of the original write lock) we got
+    /// which suspensions.
+    suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
+    /// The current state of the lock that's actually effective.
+    active: Lock,
+}
+
+/// Write locks are identified by a stack frame and an "abstract" (untyped) lvalue.
+/// It may be tempting to use the lifetime as identifier, but that does not work
+/// for two reasons:
+/// * First of all, due to subtyping, the same lock may be referred to with different
+///   lifetimes.
+/// * Secondly, different write locks may actually have the same lifetime.  See `test2`
+///   in `run-pass/many_shr_bor.rs`.
+/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
+/// considers the path frozen and hence the Id remains stable.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct WriteLockId<'tcx> {
+    frame: usize,
+    path: AbsLvalue<'tcx>,
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum Lock {
+    NoLock,
+    WriteLock(DynamicLifetime),
+    ReadLock(Vec<DynamicLifetime>), // This should never be empty -- that would be a read lock held and nobody there to release it...
+}
+use self::Lock::*;
+
+impl<'tcx> Default for LockInfo<'tcx> {
+    fn default() -> Self {
+        LockInfo::new(NoLock)
+    }
+}
+
+impl<'tcx> LockInfo<'tcx> {
+    fn new(lock: Lock) -> LockInfo<'tcx> {
+        LockInfo {
+            suspended: HashMap::new(),
+            active: lock,
+        }
+    }
+
+    fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
+        use self::AccessKind::*;
+        match (&self.active, access) {
+            (&NoLock, _) => true,
+            (&ReadLock(ref lfts), Read) => {
+                assert!(!lfts.is_empty(), "Someone left an empty read lock behind.");
+                // Read access to read-locked region is okay, no matter who's holding the read lock.
+                true
+            }
+            (&WriteLock(ref lft), _) => {
+                // All access is okay if we are the ones holding it
+                Some(lft.frame) == frame
+            }
+            _ => false, // Nothing else is okay.
+        }
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Allocations and pointers
+////////////////////////////////////////////////////////////////////////////////
+
+#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct AllocId(u64);
+
+#[derive(Debug)]
+pub enum AllocIdKind {
+    /// We can't ever have more than `usize::max_value` functions at the same time
+    /// since we never "deallocate" functions
+    Function(usize),
+    /// Locals and heap allocations (also statics for now, but those will get their
+    /// own variant soonish).
+    Runtime(u64),
+}
+
+impl AllocIdKind {
+    pub fn into_alloc_id(self) -> AllocId {
+        match self {
+            AllocIdKind::Function(n) => AllocId(n as u64),
+            AllocIdKind::Runtime(n) => AllocId((1 << 63) | n),
+        }
+    }
+}
+
+impl AllocId {
+    /// Currently yields the top bit to discriminate the `AllocIdKind`s
+    fn discriminant(self) -> u64 {
+        self.0 >> 63
+    }
+    /// Yields everything but the discriminant bits
+    pub fn index(self) -> u64 {
+        self.0 & ((1 << 63) - 1)
+    }
+    pub fn into_alloc_id_kind(self) -> AllocIdKind {
+        match self.discriminant() {
+            0 => AllocIdKind::Function(self.index() as usize),
+            1 => AllocIdKind::Runtime(self.index()),
+            n => bug!("got discriminant {} for AllocId", n),
+        }
+    }
+}
+
+impl fmt::Display for AllocId {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self.into_alloc_id_kind())
+    }
+}
+
+impl fmt::Debug for AllocId {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{:?}", self.into_alloc_id_kind())
+    }
+}
+
+#[derive(Debug)]
+pub struct Allocation<'tcx, M> {
+    /// The actual bytes of the allocation.
+    /// Note that the bytes of a pointer represent the offset of the pointer
+    pub bytes: Vec<u8>,
+    /// Maps from byte addresses to allocations.
+    /// Only the first byte of a pointer is inserted into the map.
+    pub relocations: BTreeMap<u64, AllocId>,
+    /// Denotes undefined memory. Reading from undefined memory is forbidden in miri
+    pub undef_mask: UndefMask,
+    /// The alignment of the allocation to detect unaligned reads.
+    pub align: u64,
+    /// Whether the allocation may be modified.
+    pub mutable: Mutability,
+    /// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this
+    /// allocation is modified or deallocated in the future.
+    /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
+    pub kind: MemoryKind<M>,
+    /// Memory regions that are locked by some function
+    locks: RangeMap<LockInfo<'tcx>>,
+}
+
+impl<'tcx, M> Allocation<'tcx, M> {
+    fn check_locks(
+        &self,
+        frame: Option<usize>,
+        offset: u64,
+        len: u64,
+        access: AccessKind,
+    ) -> Result<(), LockInfo<'tcx>> {
+        if len == 0 {
+            return Ok(());
+        }
+        for lock in self.locks.iter(offset, len) {
+            // Check if the lock is in conflict with the access.
+            if !lock.access_permitted(frame, access) {
+                return Err(lock.clone());
+            }
+        }
+        Ok(())
+    }
+}
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum MemoryKind<T> {
+    /// Error if deallocated except during a stack pop
+    Stack,
+    /// Static in the process of being initialized.
+    /// The difference is important: An immutable static referring to a
+    /// mutable initialized static will freeze immutably and would not
+    /// be able to distinguish already initialized statics from uninitialized ones
+    UninitializedStatic,
+    /// May never be deallocated
+    Static,
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+    Machine(T),
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct MemoryPointer {
+    pub alloc_id: AllocId,
+    pub offset: u64,
+}
+
+impl<'tcx> MemoryPointer {
+    pub fn new(alloc_id: AllocId, offset: u64) -> Self {
+        MemoryPointer { alloc_id, offset }
+    }
+
+    pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+        MemoryPointer::new(
+            self.alloc_id,
+            cx.data_layout().wrapping_signed_offset(self.offset, i),
+        )
+    }
+
+    pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
+        let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
+        (MemoryPointer::new(self.alloc_id, res), over)
+    }
+
+    pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+        Ok(MemoryPointer::new(
+            self.alloc_id,
+            cx.data_layout().signed_offset(self.offset, i)?,
+        ))
+    }
+
+    pub fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
+        let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
+        (MemoryPointer::new(self.alloc_id, res), over)
+    }
+
+    pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+        Ok(MemoryPointer::new(
+            self.alloc_id,
+            cx.data_layout().offset(self.offset, i)?,
+        ))
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Top-level interpreter memory
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct Memory<'a, 'tcx, M: Machine<'tcx>> {
+    /// Additional data required by the Machine
+    pub data: M::MemoryData,
+
+    /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
+    alloc_map: HashMap<u64, Allocation<'tcx, M::MemoryKinds>>,
+
+    /// The AllocId to assign to the next new regular allocation. Always incremented, never gets smaller.
+    next_alloc_id: u64,
+
+    /// Number of virtual bytes allocated.
+    memory_usage: u64,
+
+    /// Maximum number of virtual bytes that may be allocated.
+    memory_size: u64,
+
+    /// Function "allocations". They exist solely so pointers have something to point to, and
+    /// we can figure out what they point to.
+    functions: Vec<Instance<'tcx>>,
+
+    /// Inverse map of `functions` so we don't allocate a new pointer every time we need one
+    function_alloc_cache: HashMap<Instance<'tcx>, AllocId>,
+
+    /// Target machine data layout to emulate.
+    pub layout: &'a TargetDataLayout,
+
+    /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
+    /// allocations for string and bytestring literals.
+    literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
+
+    /// To avoid having to pass flags to every single memory access, we have some global state saying whether
+    /// alignment checking is currently enforced for read and/or write accesses.
+    reads_are_aligned: Cell<bool>,
+    writes_are_aligned: Cell<bool>,
+
+    /// The current stack frame.  Used to check accesses against locks.
+    pub(super) cur_frame: usize,
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    pub fn new(layout: &'a TargetDataLayout, max_memory: u64, data: M::MemoryData) -> Self {
+        Memory {
+            data,
+            alloc_map: HashMap::new(),
+            functions: Vec::new(),
+            function_alloc_cache: HashMap::new(),
+            next_alloc_id: 0,
+            layout,
+            memory_size: max_memory,
+            memory_usage: 0,
+            literal_alloc_cache: HashMap::new(),
+            reads_are_aligned: Cell::new(true),
+            writes_are_aligned: Cell::new(true),
+            cur_frame: usize::max_value(),
+        }
+    }
+
+    pub fn allocations<'x>(
+        &'x self,
+    ) -> impl Iterator<Item = (AllocId, &'x Allocation<M::MemoryKinds>)> {
+        self.alloc_map.iter().map(|(&id, alloc)| {
+            (AllocIdKind::Runtime(id).into_alloc_id(), alloc)
+        })
+    }
+
+    pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer {
+        if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) {
+            return MemoryPointer::new(alloc_id, 0);
+        }
+        let id = self.functions.len();
+        debug!("creating fn ptr: {}", id);
+        self.functions.push(instance);
+        let alloc_id = AllocIdKind::Function(id).into_alloc_id();
+        self.function_alloc_cache.insert(instance, alloc_id);
+        MemoryPointer::new(alloc_id, 0)
+    }
+
+    pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> {
+        if let Some(&alloc_id) = self.literal_alloc_cache.get(bytes) {
+            return Ok(MemoryPointer::new(alloc_id, 0));
+        }
+
+        let ptr = self.allocate(
+            bytes.len() as u64,
+            1,
+            MemoryKind::UninitializedStatic,
+        )?;
+        self.write_bytes(ptr.into(), bytes)?;
+        self.mark_static_initalized(
+            ptr.alloc_id,
+            Mutability::Immutable,
+        )?;
+        self.literal_alloc_cache.insert(
+            bytes.to_vec(),
+            ptr.alloc_id,
+        );
+        Ok(ptr)
+    }
+
+    pub fn allocate(
+        &mut self,
+        size: u64,
+        align: u64,
+        kind: MemoryKind<M::MemoryKinds>,
+    ) -> EvalResult<'tcx, MemoryPointer> {
+        assert_ne!(align, 0);
+        assert!(align.is_power_of_two());
+
+        if self.memory_size - self.memory_usage < size {
+            return err!(OutOfMemory {
+                allocation_size: size,
+                memory_size: self.memory_size,
+                memory_usage: self.memory_usage,
+            });
+        }
+        self.memory_usage += size;
+        assert_eq!(size as usize as u64, size);
+        let alloc = Allocation {
+            bytes: vec![0; size as usize],
+            relocations: BTreeMap::new(),
+            undef_mask: UndefMask::new(size),
+            align,
+            kind,
+            mutable: Mutability::Mutable,
+            locks: RangeMap::new(),
+        };
+        let id = self.next_alloc_id;
+        self.next_alloc_id += 1;
+        self.alloc_map.insert(id, alloc);
+        Ok(MemoryPointer::new(
+            AllocIdKind::Runtime(id).into_alloc_id(),
+            0,
+        ))
+    }
+
+    pub fn reallocate(
+        &mut self,
+        ptr: MemoryPointer,
+        old_size: u64,
+        old_align: u64,
+        new_size: u64,
+        new_align: u64,
+        kind: MemoryKind<M::MemoryKinds>,
+    ) -> EvalResult<'tcx, MemoryPointer> {
+        use std::cmp::min;
+
+        if ptr.offset != 0 {
+            return err!(ReallocateNonBasePtr);
+        }
+        if let Ok(alloc) = self.get(ptr.alloc_id) {
+            if alloc.kind != kind {
+                return err!(ReallocatedWrongMemoryKind(
+                    format!("{:?}", alloc.kind),
+                    format!("{:?}", kind),
+                ));
+            }
+        }
+
+        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc"
+        let new_ptr = self.allocate(new_size, new_align, kind)?;
+        self.copy(
+            ptr.into(),
+            new_ptr.into(),
+            min(old_size, new_size),
+            min(old_align, new_align),
+            /*nonoverlapping*/
+            true,
+        )?;
+        self.deallocate(ptr, Some((old_size, old_align)), kind)?;
+
+        Ok(new_ptr)
+    }
+
+    pub fn deallocate(
+        &mut self,
+        ptr: MemoryPointer,
+        size_and_align: Option<(u64, u64)>,
+        kind: MemoryKind<M::MemoryKinds>,
+    ) -> EvalResult<'tcx> {
+        if ptr.offset != 0 {
+            return err!(DeallocateNonBasePtr);
+        }
+
+        let alloc_id = match ptr.alloc_id.into_alloc_id_kind() {
+            AllocIdKind::Function(_) => {
+                return err!(DeallocatedWrongMemoryKind(
+                    "function".to_string(),
+                    format!("{:?}", kind),
+                ))
+            }
+            AllocIdKind::Runtime(id) => id,
+        };
+
+        let alloc = match self.alloc_map.remove(&alloc_id) {
+            Some(alloc) => alloc,
+            None => return err!(DoubleFree),
+        };
+
+        // It is okay for us to still holds locks on deallocation -- for example, we could store data we own
+        // in a local, and the local could be deallocated (from StorageDead) before the function returns.
+        // However, we should check *something*.  For now, we make sure that there is no conflicting write
+        // lock by another frame.  We *have* to permit deallocation if we hold a read lock.
+        // TODO: Figure out the exact rules here.
+        alloc
+            .check_locks(
+                Some(self.cur_frame),
+                0,
+                alloc.bytes.len() as u64,
+                AccessKind::Read,
+            )
+            .map_err(|lock| {
+                EvalErrorKind::DeallocatedLockedMemory {
+                    ptr,
+                    lock: lock.active,
+                }
+            })?;
+
+        if alloc.kind != kind {
+            return err!(DeallocatedWrongMemoryKind(
+                format!("{:?}", alloc.kind),
+                format!("{:?}", kind),
+            ));
+        }
+        if let Some((size, align)) = size_and_align {
+            if size != alloc.bytes.len() as u64 || align != alloc.align {
+                return err!(IncorrectAllocationInformation);
+            }
+        }
+
+        self.memory_usage -= alloc.bytes.len() as u64;
+        debug!("deallocated : {}", ptr.alloc_id);
+
+        Ok(())
+    }
+
+    pub fn pointer_size(&self) -> u64 {
+        self.layout.pointer_size.bytes()
+    }
+
+    pub fn endianess(&self) -> layout::Endian {
+        self.layout.endian
+    }
+
+    /// Check that the pointer is aligned AND non-NULL.
+    pub fn check_align(&self, ptr: Pointer, align: u64, access: Option<AccessKind>) -> EvalResult<'tcx> {
+        // Check non-NULL/Undef, extract offset
+        let (offset, alloc_align) = match ptr.into_inner_primval() {
+            PrimVal::Ptr(ptr) => {
+                let alloc = self.get(ptr.alloc_id)?;
+                (ptr.offset, alloc.align)
+            }
+            PrimVal::Bytes(bytes) => {
+                let v = ((bytes as u128) % (1 << self.pointer_size())) as u64;
+                if v == 0 {
+                    return err!(InvalidNullPointerUsage);
+                }
+                (v, align) // the base address if the "integer allocation" is 0 and hence always aligned
+            }
+            PrimVal::Undef => return err!(ReadUndefBytes),
+        };
+        // See if alignment checking is disabled
+        let enforce_alignment = match access {
+            Some(AccessKind::Read) => self.reads_are_aligned.get(),
+            Some(AccessKind::Write) => self.writes_are_aligned.get(),
+            None => true,
+        };
+        if !enforce_alignment {
+            return Ok(());
+        }
+        // Check alignment
+        if alloc_align < align {
+            return err!(AlignmentCheckFailed {
+                has: alloc_align,
+                required: align,
+            });
+        }
+        if offset % align == 0 {
+            Ok(())
+        } else {
+            err!(AlignmentCheckFailed {
+                has: offset % align,
+                required: align,
+            })
+        }
+    }
+
+    pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> {
+        let alloc = self.get(ptr.alloc_id)?;
+        let allocation_size = alloc.bytes.len() as u64;
+        if ptr.offset > allocation_size {
+            return err!(PointerOutOfBounds {
+                ptr,
+                access,
+                allocation_size,
+            });
+        }
+        Ok(())
+    }
+}
+
+/// Locking
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    pub(crate) fn check_locks(
+        &self,
+        ptr: MemoryPointer,
+        len: u64,
+        access: AccessKind,
+    ) -> EvalResult<'tcx> {
+        if len == 0 {
+            return Ok(());
+        }
+        let alloc = self.get(ptr.alloc_id)?;
+        let frame = self.cur_frame;
+        alloc
+            .check_locks(Some(frame), ptr.offset, len, access)
+            .map_err(|lock| {
+                EvalErrorKind::MemoryLockViolation {
+                    ptr,
+                    len,
+                    frame,
+                    access,
+                    lock: lock.active,
+                }.into()
+            })
+    }
+
+    /// Acquire the lock for the given lifetime
+    pub(crate) fn acquire_lock(
+        &mut self,
+        ptr: MemoryPointer,
+        len: u64,
+        region: Option<region::Scope>,
+        kind: AccessKind,
+    ) -> EvalResult<'tcx> {
+        let frame = self.cur_frame;
+        assert!(len > 0);
+        trace!(
+            "Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}",
+            frame,
+            kind,
+            ptr,
+            len,
+            region
+        );
+        self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
+        let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
+
+        // Iterate over our range and acquire the lock.  If the range is already split into pieces,
+        // we have to manipulate all of them.
+        let lifetime = DynamicLifetime { frame, region };
+        for lock in alloc.locks.iter_mut(ptr.offset, len) {
+            if !lock.access_permitted(None, kind) {
+                return err!(MemoryAcquireConflict {
+                    ptr,
+                    len,
+                    kind,
+                    lock: lock.active.clone(),
+                });
+            }
+            // See what we have to do
+            match (&mut lock.active, kind) {
+                (active @ &mut NoLock, AccessKind::Write) => {
+                    *active = WriteLock(lifetime);
+                }
+                (active @ &mut NoLock, AccessKind::Read) => {
+                    *active = ReadLock(vec![lifetime]);
+                }
+                (&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
+                    lifetimes.push(lifetime);
+                }
+                _ => bug!("We already checked that there is no conflicting lock"),
+            }
+        }
+        Ok(())
+    }
+
+    /// Release or suspend a write lock of the given lifetime prematurely.
+    /// When releasing, if there is a read lock or someone else's write lock, that's an error.
+    /// If no lock is held, that's fine.  This can happen when e.g. a local is initialized
+    /// from a constant, and then suspended.
+    /// When suspending, the same cases are fine; we just register an additional suspension.
+    pub(crate) fn suspend_write_lock(
+        &mut self,
+        ptr: MemoryPointer,
+        len: u64,
+        lock_path: &AbsLvalue<'tcx>,
+        suspend: Option<region::Scope>,
+    ) -> EvalResult<'tcx> {
+        assert!(len > 0);
+        let cur_frame = self.cur_frame;
+        let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
+
+        'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) {
+            let is_our_lock = match lock.active {
+                WriteLock(lft) =>
+                    // Double-check that we are holding the lock.
+                    // (Due to subtyping, checking the region would not make any sense.)
+                    lft.frame == cur_frame,
+                ReadLock(_) | NoLock => false,
+            };
+            if is_our_lock {
+                trace!("Releasing {:?}", lock.active);
+                // Disable the lock
+                lock.active = NoLock;
+            } else {
+                trace!(
+                    "Not touching {:?} as it is not our lock",
+                    lock.active,
+                );
+            }
+            // Check if we want to register a suspension
+            if let Some(suspend_region) = suspend {
+                let lock_id = WriteLockId {
+                    frame: cur_frame,
+                    path: lock_path.clone(),
+                };
+                trace!("Adding suspension to {:?}", lock_id);
+                let mut new_suspension = false;
+                lock.suspended
+                    .entry(lock_id)
+                    // Remember whether we added a new suspension or not
+                    .or_insert_with(|| { new_suspension = true; Vec::new() })
+                    .push(suspend_region);
+                // If the suspension is new, we should have owned this.
+                // If there already was a suspension, we should NOT have owned this.
+                if new_suspension == is_our_lock {
+                    // All is well
+                    continue 'locks;
+                }
+            } else {
+                if !is_our_lock {
+                    // All is well.
+                    continue 'locks;
+                }
+            }
+            // If we get here, releasing this is an error except for NoLock.
+            if lock.active != NoLock {
+                return err!(InvalidMemoryLockRelease {
+                    ptr,
+                    len,
+                    frame: cur_frame,
+                    lock: lock.active.clone(),
+                });
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Release a suspension from the write lock.  If this is the last suspension or if there is no suspension, acquire the lock.
+    pub(crate) fn recover_write_lock(
+        &mut self,
+        ptr: MemoryPointer,
+        len: u64,
+        lock_path: &AbsLvalue<'tcx>,
+        lock_region: Option<region::Scope>,
+        suspended_region: region::Scope,
+    ) -> EvalResult<'tcx> {
+        assert!(len > 0);
+        let cur_frame = self.cur_frame;
+        let lock_id = WriteLockId {
+            frame: cur_frame,
+            path: lock_path.clone(),
+        };
+        let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
+
+        for lock in alloc.locks.iter_mut(ptr.offset, len) {
+            // Check if we have a suspension here
+            let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
+                None => {
+                    trace!("No suspension around, we can just acquire");
+                    (true, false)
+                }
+                Some(suspensions) => {
+                    trace!("Found suspension of {:?}, removing it", lock_id);
+                    // That's us!  Remove suspension (it should be in there).  The same suspension can
+                    // occur multiple times (when there are multiple shared borrows of this that have the same
+                    // lifetime); only remove one of them.
+                    let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
+                        None => // TODO: Can the user trigger this?
+                            bug!("We have this lock suspended, but not for the given region."),
+                        Some((idx, _)) => idx
+                    };
+                    suspensions.remove(idx);
+                    let got_lock = suspensions.is_empty();
+                    if got_lock {
+                        trace!("All suspensions are gone, we can have the lock again");
+                    }
+                    (got_lock, got_lock)
+                }
+            };
+            if remove_suspension {
+                // with NLL, we could do that up in the match above...
+                assert!(got_the_lock);
+                lock.suspended.remove(&lock_id);
+            }
+            if got_the_lock {
+                match lock.active {
+                    ref mut active @ NoLock => {
+                        *active = WriteLock(
+                            DynamicLifetime {
+                                frame: cur_frame,
+                                region: lock_region,
+                            }
+                        );
+                    }
+                    _ => {
+                        return err!(MemoryAcquireConflict {
+                            ptr,
+                            len,
+                            kind: AccessKind::Write,
+                            lock: lock.active.clone(),
+                        })
+                    }
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>) {
+        let cur_frame = self.cur_frame;
+        trace!(
+            "Releasing frame {} locks that expire at {:?}",
+            cur_frame,
+            ending_region
+        );
+        let has_ended = |lifetime: &DynamicLifetime| -> bool {
+            if lifetime.frame != cur_frame {
+                return false;
+            }
+            match ending_region {
+                None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
+                // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
+                // end of a function.  Same for a function still having recoveries.
+                Some(ending_region) => lifetime.region == Some(ending_region),
+            }
+        };
+
+        for alloc in self.alloc_map.values_mut() {
+            for lock in alloc.locks.iter_mut_all() {
+                // Delete everything that ends now -- i.e., keep only all the other lifetimes.
+                let lock_ended = match lock.active {
+                    WriteLock(ref lft) => has_ended(lft),
+                    ReadLock(ref mut lfts) => {
+                        lfts.retain(|lft| !has_ended(lft));
+                        lfts.is_empty()
+                    }
+                    NoLock => false,
+                };
+                if lock_ended {
+                    lock.active = NoLock;
+                }
+                // Also clean up suspended write locks when the function returns
+                if ending_region.is_none() {
+                    lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
+                }
+            }
+            // Clean up the map
+            alloc.locks.retain(|lock| match lock.active {
+                NoLock => lock.suspended.len() > 0,
+                _ => true,
+            });
+        }
+    }
+}
+
+/// Allocation accessors
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<'tcx, M::MemoryKinds>> {
+        match id.into_alloc_id_kind() {
+            AllocIdKind::Function(_) => err!(DerefFunctionPointer),
+            AllocIdKind::Runtime(id) => {
+                match self.alloc_map.get(&id) {
+                    Some(alloc) => Ok(alloc),
+                    None => err!(DanglingPointerDeref),
+                }
+            }
+        }
+    }
+
+    fn get_mut_unchecked(
+        &mut self,
+        id: AllocId,
+    ) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
+        match id.into_alloc_id_kind() {
+            AllocIdKind::Function(_) => err!(DerefFunctionPointer),
+            AllocIdKind::Runtime(id) => {
+                match self.alloc_map.get_mut(&id) {
+                    Some(alloc) => Ok(alloc),
+                    None => err!(DanglingPointerDeref),
+                }
+            }
+        }
+    }
+
+    fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
+        let alloc = self.get_mut_unchecked(id)?;
+        if alloc.mutable == Mutability::Mutable {
+            Ok(alloc)
+        } else {
+            err!(ModifiedConstantMemory)
+        }
+    }
+
+    pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> {
+        if ptr.offset != 0 {
+            return err!(InvalidFunctionPointer);
+        }
+        debug!("reading fn ptr: {}", ptr.alloc_id);
+        match ptr.alloc_id.into_alloc_id_kind() {
+            AllocIdKind::Function(id) => Ok(self.functions[id]),
+            AllocIdKind::Runtime(_) => err!(ExecuteMemory),
+        }
+    }
+
+    /// For debugging, print an allocation and all allocations it points to, recursively.
+    pub fn dump_alloc(&self, id: AllocId) {
+        self.dump_allocs(vec![id]);
+    }
+
+    /// For debugging, print a list of allocations and all allocations they point to, recursively.
+    pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
+        use std::fmt::Write;
+        allocs.sort();
+        allocs.dedup();
+        let mut allocs_to_print = VecDeque::from(allocs);
+        let mut allocs_seen = HashSet::new();
+
+        while let Some(id) = allocs_to_print.pop_front() {
+            let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
+            let prefix_len = msg.len();
+            let mut relocations = vec![];
+
+            let alloc = match id.into_alloc_id_kind() {
+                AllocIdKind::Function(id) => {
+                    trace!("{} {}", msg, self.functions[id]);
+                    continue;
+                }
+                AllocIdKind::Runtime(id) => {
+                    match self.alloc_map.get(&id) {
+                        Some(a) => a,
+                        None => {
+                            trace!("{} (deallocated)", msg);
+                            continue;
+                        }
+                    }
+                }
+            };
+
+            for i in 0..(alloc.bytes.len() as u64) {
+                if let Some(&target_id) = alloc.relocations.get(&i) {
+                    if allocs_seen.insert(target_id) {
+                        allocs_to_print.push_back(target_id);
+                    }
+                    relocations.push((i, target_id));
+                }
+                if alloc.undef_mask.is_range_defined(i, i + 1) {
+                    // this `as usize` is fine, since `i` came from a `usize`
+                    write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
+                } else {
+                    msg.push_str("__ ");
+                }
+            }
+
+            let immutable = match (alloc.kind, alloc.mutable) {
+                (MemoryKind::UninitializedStatic, _) => {
+                    " (static in the process of initialization)".to_owned()
+                }
+                (MemoryKind::Static, Mutability::Mutable) => " (static mut)".to_owned(),
+                (MemoryKind::Static, Mutability::Immutable) => " (immutable)".to_owned(),
+                (MemoryKind::Machine(m), _) => format!(" ({:?})", m),
+                (MemoryKind::Stack, _) => " (stack)".to_owned(),
+            };
+            trace!(
+                "{}({} bytes, alignment {}){}",
+                msg,
+                alloc.bytes.len(),
+                alloc.align,
+                immutable
+            );
+
+            if !relocations.is_empty() {
+                msg.clear();
+                write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
+                let mut pos = 0;
+                let relocation_width = (self.pointer_size() - 1) * 3;
+                for (i, target_id) in relocations {
+                    // this `as usize` is fine, since we can't print more chars than `usize::MAX`
+                    write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
+                    let target = format!("({})", target_id);
+                    // this `as usize` is fine, since we can't print more chars than `usize::MAX`
+                    write!(msg, "â””{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
+                    pos = i + self.pointer_size();
+                }
+                trace!("{}", msg);
+            }
+        }
+    }
+
+    pub fn leak_report(&self) -> usize {
+        trace!("### LEAK REPORT ###");
+        let leaks: Vec<_> = self.alloc_map
+            .iter()
+            .filter_map(|(&key, val)| if val.kind != MemoryKind::Static {
+                Some(AllocIdKind::Runtime(key).into_alloc_id())
+            } else {
+                None
+            })
+            .collect();
+        let n = leaks.len();
+        self.dump_allocs(leaks);
+        n
+    }
+}
+
+/// Byte accessors
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    fn get_bytes_unchecked(
+        &self,
+        ptr: MemoryPointer,
+        size: u64,
+        align: u64,
+    ) -> EvalResult<'tcx, &[u8]> {
+        // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
+        self.check_align(ptr.into(), align, Some(AccessKind::Read))?;
+        if size == 0 {
+            return Ok(&[]);
+        }
+        self.check_locks(ptr, size, AccessKind::Read)?;
+        self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
+        let alloc = self.get(ptr.alloc_id)?;
+        assert_eq!(ptr.offset as usize as u64, ptr.offset);
+        assert_eq!(size as usize as u64, size);
+        let offset = ptr.offset as usize;
+        Ok(&alloc.bytes[offset..offset + size as usize])
+    }
+
+    fn get_bytes_unchecked_mut(
+        &mut self,
+        ptr: MemoryPointer,
+        size: u64,
+        align: u64,
+    ) -> EvalResult<'tcx, &mut [u8]> {
+        // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
+        self.check_align(ptr.into(), align, Some(AccessKind::Write))?;
+        if size == 0 {
+            return Ok(&mut []);
+        }
+        self.check_locks(ptr, size, AccessKind::Write)?;
+        self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
+        let alloc = self.get_mut(ptr.alloc_id)?;
+        assert_eq!(ptr.offset as usize as u64, ptr.offset);
+        assert_eq!(size as usize as u64, size);
+        let offset = ptr.offset as usize;
+        Ok(&mut alloc.bytes[offset..offset + size as usize])
+    }
+
+    fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
+        assert_ne!(size, 0);
+        if self.relocations(ptr, size)?.count() != 0 {
+            return err!(ReadPointerAsBytes);
+        }
+        self.check_defined(ptr, size)?;
+        self.get_bytes_unchecked(ptr, size, align)
+    }
+
+    fn get_bytes_mut(
+        &mut self,
+        ptr: MemoryPointer,
+        size: u64,
+        align: u64,
+    ) -> EvalResult<'tcx, &mut [u8]> {
+        assert_ne!(size, 0);
+        self.clear_relocations(ptr, size)?;
+        self.mark_definedness(ptr.into(), size, true)?;
+        self.get_bytes_unchecked_mut(ptr, size, align)
+    }
+}
+
+/// Reading and writing
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    /// mark an allocation pointed to by a static as static and initialized
+    fn mark_inner_allocation_initialized(
+        &mut self,
+        alloc: AllocId,
+        mutability: Mutability,
+    ) -> EvalResult<'tcx> {
+        // relocations into other statics are not "inner allocations"
+        if self.get(alloc).ok().map_or(false, |alloc| {
+            alloc.kind != MemoryKind::UninitializedStatic
+        })
+        {
+            self.mark_static_initalized(alloc, mutability)?;
+        }
+        Ok(())
+    }
+
+    /// mark an allocation as static and initialized, either mutable or not
+    pub fn mark_static_initalized(
+        &mut self,
+        alloc_id: AllocId,
+        mutability: Mutability,
+    ) -> EvalResult<'tcx> {
+        trace!(
+            "mark_static_initalized {:?}, mutability: {:?}",
+            alloc_id,
+            mutability
+        );
+        // do not use `self.get_mut(alloc_id)` here, because we might have already marked a
+        // sub-element or have circular pointers (e.g. `Rc`-cycles)
+        let alloc_id = match alloc_id.into_alloc_id_kind() {
+            AllocIdKind::Function(_) => return Ok(()),
+            AllocIdKind::Runtime(id) => id,
+        };
+        let relocations = match self.alloc_map.get_mut(&alloc_id) {
+            Some(&mut Allocation {
+                     ref mut relocations,
+                     ref mut kind,
+                     ref mut mutable,
+                     ..
+                 }) => {
+                match *kind {
+                    // const eval results can refer to "locals".
+                    // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1`
+                    MemoryKind::Stack |
+                    // The entire point of this function
+                    MemoryKind::UninitializedStatic => {},
+                    MemoryKind::Machine(m) => M::mark_static_initialized(m)?,
+                    MemoryKind::Static => {
+                        trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized");
+                        return Ok(());
+                    },
+                }
+                *kind = MemoryKind::Static;
+                *mutable = mutability;
+                // take out the relocations vector to free the borrow on self, so we can call
+                // mark recursively
+                mem::replace(relocations, Default::default())
+            }
+            None => return err!(DanglingPointerDeref),
+        };
+        // recurse into inner allocations
+        for &alloc in relocations.values() {
+            self.mark_inner_allocation_initialized(alloc, mutability)?;
+        }
+        // put back the relocations
+        self.alloc_map
+            .get_mut(&alloc_id)
+            .expect("checked above")
+            .relocations = relocations;
+        Ok(())
+    }
+
+    pub fn copy(
+        &mut self,
+        src: Pointer,
+        dest: Pointer,
+        size: u64,
+        align: u64,
+        nonoverlapping: bool,
+    ) -> EvalResult<'tcx> {
+        // Empty accesses don't need to be valid pointers, but they should still be aligned
+        self.check_align(src, align, Some(AccessKind::Read))?;
+        self.check_align(dest, align, Some(AccessKind::Write))?;
+        if size == 0 {
+            return Ok(());
+        }
+        let src = src.to_ptr()?;
+        let dest = dest.to_ptr()?;
+        self.check_relocation_edges(src, size)?;
+
+        // first copy the relocations to a temporary buffer, because
+        // `get_bytes_mut` will clear the relocations, which is correct,
+        // since we don't want to keep any relocations at the target.
+
+        let relocations: Vec<_> = self.relocations(src, size)?
+            .map(|(&offset, &alloc_id)| {
+                // Update relocation offsets for the new positions in the destination allocation.
+                (offset + dest.offset - src.offset, alloc_id)
+            })
+            .collect();
+
+        let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr();
+        let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr();
+
+        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
+        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
+        // `dest` could possibly overlap.
+        unsafe {
+            assert_eq!(size as usize as u64, size);
+            if src.alloc_id == dest.alloc_id {
+                if nonoverlapping {
+                    if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
+                        (dest.offset <= src.offset && dest.offset + size > src.offset)
+                    {
+                        return err!(Intrinsic(
+                            format!("copy_nonoverlapping called on overlapping ranges"),
+                        ));
+                    }
+                }
+                ptr::copy(src_bytes, dest_bytes, size as usize);
+            } else {
+                ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
+            }
+        }
+
+        self.copy_undef_mask(src, dest, size)?;
+        // copy back the relocations
+        self.get_mut(dest.alloc_id)?.relocations.extend(relocations);
+
+        Ok(())
+    }
+
+    pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
+        let alloc = self.get(ptr.alloc_id)?;
+        assert_eq!(ptr.offset as usize as u64, ptr.offset);
+        let offset = ptr.offset as usize;
+        match alloc.bytes[offset..].iter().position(|&c| c == 0) {
+            Some(size) => {
+                if self.relocations(ptr, (size + 1) as u64)?.count() != 0 {
+                    return err!(ReadPointerAsBytes);
+                }
+                self.check_defined(ptr, (size + 1) as u64)?;
+                self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?;
+                Ok(&alloc.bytes[offset..offset + size])
+            }
+            None => err!(UnterminatedCString(ptr)),
+        }
+    }
+
+    pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
+        // Empty accesses don't need to be valid pointers, but they should still be non-NULL
+        self.check_align(ptr, 1, Some(AccessKind::Read))?;
+        if size == 0 {
+            return Ok(&[]);
+        }
+        self.get_bytes(ptr.to_ptr()?, size, 1)
+    }
+
+    pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
+        // Empty accesses don't need to be valid pointers, but they should still be non-NULL
+        self.check_align(ptr, 1, Some(AccessKind::Write))?;
+        if src.is_empty() {
+            return Ok(());
+        }
+        let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
+        bytes.clone_from_slice(src);
+        Ok(())
+    }
+
+    pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
+        // Empty accesses don't need to be valid pointers, but they should still be non-NULL
+        self.check_align(ptr, 1, Some(AccessKind::Write))?;
+        if count == 0 {
+            return Ok(());
+        }
+        let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
+        for b in bytes {
+            *b = val;
+        }
+        Ok(())
+    }
+
+    pub fn read_primval(&self, ptr: MemoryPointer, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> {
+        self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
+        let endianess = self.endianess();
+        let bytes = self.get_bytes_unchecked(ptr, size, self.int_align(size))?;
+        // Undef check happens *after* we established that the alignment is correct.
+        // We must not return Ok() for unaligned pointers!
+        if self.check_defined(ptr, size).is_err() {
+            return Ok(PrimVal::Undef.into());
+        }
+        // Now we do the actual reading
+        let bytes = if signed {
+            read_target_int(endianess, bytes).unwrap() as u128
+        } else {
+            read_target_uint(endianess, bytes).unwrap()
+        };
+        // See if we got a pointer
+        if size != self.pointer_size() {
+            if self.relocations(ptr, size)?.count() != 0 {
+                return err!(ReadPointerAsBytes);
+            }
+        } else {
+            let alloc = self.get(ptr.alloc_id)?;
+            match alloc.relocations.get(&ptr.offset) {
+                Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, bytes as u64))),
+                None => {},
+            }
+        }
+        // We don't. Just return the bytes.
+        Ok(PrimVal::Bytes(bytes))
+    }
+
+    pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer) -> EvalResult<'tcx, PrimVal> {
+        self.read_primval(ptr, self.pointer_size(), false)
+    }
+
+    pub fn write_primval(&mut self, ptr: MemoryPointer, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
+        let endianess = self.endianess();
+
+        let bytes = match val {
+            PrimVal::Ptr(val) => {
+                assert_eq!(size, self.pointer_size());
+                val.offset as u128
+            }
+
+            PrimVal::Bytes(bytes) => {
+                // We need to mask here, or the byteorder crate can die when given a u64 larger
+                // than fits in an integer of the requested size.
+                let mask = match size {
+                    1 => !0u8 as u128,
+                    2 => !0u16 as u128,
+                    4 => !0u32 as u128,
+                    8 => !0u64 as u128,
+                    16 => !0,
+                    n => bug!("unexpected PrimVal::Bytes size: {}", n),
+                };
+                bytes & mask
+            }
+
+            PrimVal::Undef => {
+                self.mark_definedness(PrimVal::Ptr(ptr).into(), size, false)?;
+                return Ok(());
+            }
+        };
+
+        {
+            let align = self.int_align(size);
+            let dst = self.get_bytes_mut(ptr, size, align)?;
+            if signed {
+                write_target_int(endianess, dst, bytes as i128).unwrap();
+            } else {
+                write_target_uint(endianess, dst, bytes).unwrap();
+            }
+        }
+
+        // See if we have to also write a relocation
+        match val {
+            PrimVal::Ptr(val) => {
+                self.get_mut(ptr.alloc_id)?.relocations.insert(
+                    ptr.offset,
+                    val.alloc_id,
+                );
+            }
+            _ => {}
+        }
+
+        Ok(())
+    }
+
+    pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, val: PrimVal) -> EvalResult<'tcx> {
+        let ptr_size = self.pointer_size();
+        self.write_primval(ptr, val, ptr_size, false)
+    }
+
+    fn int_align(&self, size: u64) -> u64 {
+        // We assume pointer-sized integers have the same alignment as pointers.
+        // We also assume signed and unsigned integers of the same size have the same alignment.
+        match size {
+            1 => self.layout.i8_align.abi(),
+            2 => self.layout.i16_align.abi(),
+            4 => self.layout.i32_align.abi(),
+            8 => self.layout.i64_align.abi(),
+            16 => self.layout.i128_align.abi(),
+            _ => bug!("bad integer size: {}", size),
+        }
+    }
+}
+
+/// Relocations
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    fn relocations(
+        &self,
+        ptr: MemoryPointer,
+        size: u64,
+    ) -> EvalResult<'tcx, btree_map::Range<u64, AllocId>> {
+        let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
+        let end = ptr.offset + size;
+        Ok(self.get(ptr.alloc_id)?.relocations.range(start..end))
+    }
+
+    fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
+        // Find all relocations overlapping the given range.
+        let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
+        if keys.is_empty() {
+            return Ok(());
+        }
+
+        // Find the start and end of the given range and its outermost relocations.
+        let start = ptr.offset;
+        let end = start + size;
+        let first = *keys.first().unwrap();
+        let last = *keys.last().unwrap() + self.pointer_size();
+
+        let alloc = self.get_mut(ptr.alloc_id)?;
+
+        // Mark parts of the outermost relocations as undefined if they partially fall outside the
+        // given range.
+        if first < start {
+            alloc.undef_mask.set_range(first, start, false);
+        }
+        if last > end {
+            alloc.undef_mask.set_range(end, last, false);
+        }
+
+        // Forget all the relocations.
+        for k in keys {
+            alloc.relocations.remove(&k);
+        }
+
+        Ok(())
+    }
+
+    fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
+        let overlapping_start = self.relocations(ptr, 0)?.count();
+        let overlapping_end = self.relocations(ptr.offset(size, self.layout)?, 0)?.count();
+        if overlapping_start + overlapping_end != 0 {
+            return err!(ReadPointerAsBytes);
+        }
+        Ok(())
+    }
+}
+
+/// Undefined bytes
+impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
+    // FIXME(solson): This is a very naive, slow version.
+    fn copy_undef_mask(
+        &mut self,
+        src: MemoryPointer,
+        dest: MemoryPointer,
+        size: u64,
+    ) -> EvalResult<'tcx> {
+        // The bits have to be saved locally before writing to dest in case src and dest overlap.
+        assert_eq!(size as usize as u64, size);
+        let mut v = Vec::with_capacity(size as usize);
+        for i in 0..size {
+            let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
+            v.push(defined);
+        }
+        for (i, defined) in v.into_iter().enumerate() {
+            self.get_mut(dest.alloc_id)?.undef_mask.set(
+                dest.offset +
+                    i as u64,
+                defined,
+            );
+        }
+        Ok(())
+    }
+
+    fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
+        let alloc = self.get(ptr.alloc_id)?;
+        if !alloc.undef_mask.is_range_defined(
+            ptr.offset,
+            ptr.offset + size,
+        )
+        {
+            return err!(ReadUndefBytes);
+        }
+        Ok(())
+    }
+
+    pub fn mark_definedness(
+        &mut self,
+        ptr: Pointer,
+        size: u64,
+        new_state: bool,
+    ) -> EvalResult<'tcx> {
+        if size == 0 {
+            return Ok(());
+        }
+        let ptr = ptr.to_ptr()?;
+        let alloc = self.get_mut(ptr.alloc_id)?;
+        alloc.undef_mask.set_range(
+            ptr.offset,
+            ptr.offset + size,
+            new_state,
+        );
+        Ok(())
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Methods to access integers in the target endianess
+////////////////////////////////////////////////////////////////////////////////
+
+fn write_target_uint(
+    endianess: layout::Endian,
+    mut target: &mut [u8],
+    data: u128,
+) -> Result<(), io::Error> {
+    let len = target.len();
+    match endianess {
+        layout::Endian::Little => target.write_uint128::<LittleEndian>(data, len),
+        layout::Endian::Big => target.write_uint128::<BigEndian>(data, len),
+    }
+}
+fn write_target_int(
+    endianess: layout::Endian,
+    mut target: &mut [u8],
+    data: i128,
+) -> Result<(), io::Error> {
+    let len = target.len();
+    match endianess {
+        layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
+        layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
+    }
+}
+
+fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
+    match endianess {
+        layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
+        layout::Endian::Big => source.read_uint128::<BigEndian>(source.len()),
+    }
+}
+
+fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result<i128, io::Error> {
+    match endianess {
+        layout::Endian::Little => source.read_int128::<LittleEndian>(source.len()),
+        layout::Endian::Big => source.read_int128::<BigEndian>(source.len()),
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Undefined byte tracking
+////////////////////////////////////////////////////////////////////////////////
+
+type Block = u64;
+const BLOCK_SIZE: u64 = 64;
+
+#[derive(Clone, Debug)]
+pub struct UndefMask {
+    blocks: Vec<Block>,
+    len: u64,
+}
+
+impl UndefMask {
+    fn new(size: u64) -> Self {
+        let mut m = UndefMask {
+            blocks: vec![],
+            len: 0,
+        };
+        m.grow(size, false);
+        m
+    }
+
+    /// Check whether the range `start..end` (end-exclusive) is entirely defined.
+    pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
+        if end > self.len {
+            return false;
+        }
+        for i in start..end {
+            if !self.get(i) {
+                return false;
+            }
+        }
+        true
+    }
+
+    fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
+        let len = self.len;
+        if end > len {
+            self.grow(end - len, new_state);
+        }
+        self.set_range_inbounds(start, end, new_state);
+    }
+
+    fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
+        for i in start..end {
+            self.set(i, new_state);
+        }
+    }
+
+    fn get(&self, i: u64) -> bool {
+        let (block, bit) = bit_index(i);
+        (self.blocks[block] & 1 << bit) != 0
+    }
+
+    fn set(&mut self, i: u64, new_state: bool) {
+        let (block, bit) = bit_index(i);
+        if new_state {
+            self.blocks[block] |= 1 << bit;
+        } else {
+            self.blocks[block] &= !(1 << bit);
+        }
+    }
+
+    fn grow(&mut self, amount: u64, new_state: bool) {
+        let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
+        if amount > unused_trailing_bits {
+            let additional_blocks = amount / BLOCK_SIZE + 1;
+            assert_eq!(additional_blocks as usize as u64, additional_blocks);
+            self.blocks.extend(
+                iter::repeat(0).take(additional_blocks as usize),
+            );
+        }
+        let start = self.len;
+        self.len += amount;
+        self.set_range_inbounds(start, start + amount, new_state);
+    }
+}
+
+fn bit_index(bits: u64) -> (usize, usize) {
+    let a = bits / BLOCK_SIZE;
+    let b = bits % BLOCK_SIZE;
+    assert_eq!(a as usize as u64, a);
+    assert_eq!(b as usize as u64, b);
+    (a as usize, b as usize)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Unaligned accesses
+////////////////////////////////////////////////////////////////////////////////
+
+pub trait HasMemory<'a, 'tcx, M: Machine<'tcx>> {
+    fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>;
+    fn memory(&self) -> &Memory<'a, 'tcx, M>;
+
+    // These are not supposed to be overriden.
+    fn read_maybe_aligned<F, T>(&self, aligned: bool, f: F) -> EvalResult<'tcx, T>
+    where
+        F: FnOnce(&Self) -> EvalResult<'tcx, T>,
+    {
+        let old = self.memory().reads_are_aligned.get();
+        // Do alignment checking if *all* nested calls say it has to be aligned.
+        self.memory().reads_are_aligned.set(old && aligned);
+        let t = f(self);
+        self.memory().reads_are_aligned.set(old);
+        t
+    }
+
+    fn read_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
+    where
+        F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
+    {
+        let old = self.memory().reads_are_aligned.get();
+        // Do alignment checking if *all* nested calls say it has to be aligned.
+        self.memory().reads_are_aligned.set(old && aligned);
+        let t = f(self);
+        self.memory().reads_are_aligned.set(old);
+        t
+    }
+
+    fn write_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
+    where
+        F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
+    {
+        let old = self.memory().writes_are_aligned.get();
+        // Do alignment checking if *all* nested calls say it has to be aligned.
+        self.memory().writes_are_aligned.set(old && aligned);
+        let t = f(self);
+        self.memory().writes_are_aligned.set(old);
+        t
+    }
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for Memory<'a, 'tcx, M> {
+    #[inline]
+    fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
+        self
+    }
+
+    #[inline]
+    fn memory(&self) -> &Memory<'a, 'tcx, M> {
+        self
+    }
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for EvalContext<'a, 'tcx, M> {
+    #[inline]
+    fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
+        &mut self.memory
+    }
+
+    #[inline]
+    fn memory(&self) -> &Memory<'a, 'tcx, M> {
+        &self.memory
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Pointer arithmetic
+////////////////////////////////////////////////////////////////////////////////
+
+pub trait PointerArithmetic: layout::HasDataLayout {
+    // These are not supposed to be overriden.
+
+    //// Trunace the given value to the pointer size; also return whether there was an overflow
+    fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
+        let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
+        ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
+    }
+
+    // Overflow checking only works properly on the range from -u64 to +u64.
+    fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
+        // FIXME: is it possible to over/underflow here?
+        if i < 0 {
+            // trickery to ensure that i64::min_value() works fine
+            // this formula only works for true negative values, it panics for zero!
+            let n = u64::max_value() - (i as u64) + 1;
+            val.overflowing_sub(n)
+        } else {
+            self.overflowing_offset(val, i as u64)
+        }
+    }
+
+    fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
+        let (res, over1) = val.overflowing_add(i);
+        let (res, over2) = self.truncate_to_ptr(res as u128);
+        (res, over1 || over2)
+    }
+
+    fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
+        let (res, over) = self.overflowing_signed_offset(val, i as i128);
+        if over { err!(OverflowingMath) } else { Ok(res) }
+    }
+
+    fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
+        let (res, over) = self.overflowing_offset(val, i);
+        if over { err!(OverflowingMath) } else { Ok(res) }
+    }
+
+    fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
+        self.overflowing_signed_offset(val, i as i128).0
+    }
+}
+
+impl<T: layout::HasDataLayout> PointerArithmetic for T {}
+
+impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a Memory<'a, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        self.layout
+    }
+}
+impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a EvalContext<'a, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        self.memory().layout
+    }
+}
+
+impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout
+    for &'c &'b mut EvalContext<'a, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        self.memory().layout
+    }
+}
diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs
new file mode 100644 (file)
index 0000000..08837c4
--- /dev/null
@@ -0,0 +1,42 @@
+//! An interpreter for MIR used in CTFE and by miri
+
+#[macro_export]
+macro_rules! err {
+    ($($tt:tt)*) => { Err($crate::interpret::EvalErrorKind::$($tt)*.into()) };
+}
+
+mod cast;
+mod const_eval;
+mod error;
+mod eval_context;
+mod lvalue;
+mod validation;
+mod machine;
+mod memory;
+mod operator;
+mod range_map;
+mod step;
+mod terminator;
+mod traits;
+mod value;
+
+pub use self::error::{EvalError, EvalResult, EvalErrorKind};
+
+pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, DynamicLifetime,
+                             TyAndPacked, PtrAndAlign, ValTy};
+
+pub use self::lvalue::{Lvalue, LvalueExtra, GlobalId};
+
+pub use self::memory::{AllocId, Memory, MemoryPointer, MemoryKind, HasMemory, AccessKind, AllocIdKind};
+
+use self::memory::{PointerArithmetic, Lock};
+
+use self::range_map::RangeMap;
+
+pub use self::value::{PrimVal, PrimValKind, Value, Pointer};
+
+pub use self::const_eval::{eval_body_as_integer, eval_body_as_primval};
+
+pub use self::machine::Machine;
+
+pub use self::validation::{ValidationQuery, AbsLvalue};
diff --git a/src/librustc/mir/interpret/operator.rs b/src/librustc/mir/interpret/operator.rs
new file mode 100644 (file)
index 0000000..7fe4691
--- /dev/null
@@ -0,0 +1,268 @@
+use rustc::mir;
+use rustc::ty::Ty;
+use rustc_const_math::ConstFloat;
+use syntax::ast::FloatTy;
+use std::cmp::Ordering;
+
+use super::{EvalResult, EvalContext, Lvalue, Machine, ValTy};
+
+use super::value::{PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64, f32_to_bytes,
+                   f64_to_bytes};
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    fn binop_with_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: ValTy<'tcx>,
+        right: ValTy<'tcx>,
+    ) -> EvalResult<'tcx, (PrimVal, bool)> {
+        let left_val = self.value_to_primval(left)?;
+        let right_val = self.value_to_primval(right)?;
+        self.binary_op(op, left_val, left.ty, right_val, right.ty)
+    }
+
+    /// Applies the binary operation `op` to the two operands and writes a tuple of the result
+    /// and a boolean signifying the potential overflow to the destination.
+    pub fn intrinsic_with_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: ValTy<'tcx>,
+        right: ValTy<'tcx>,
+        dest: Lvalue,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx> {
+        let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
+        let val = Value::ByValPair(val, PrimVal::from_bool(overflowed));
+        let valty = ValTy {
+            value: val,
+            ty: dest_ty,
+        };
+        self.write_value(valty, dest)
+    }
+
+    /// Applies the binary operation `op` to the arguments and writes the result to the
+    /// destination. Returns `true` if the operation overflowed.
+    pub fn intrinsic_overflowing(
+        &mut self,
+        op: mir::BinOp,
+        left: ValTy<'tcx>,
+        right: ValTy<'tcx>,
+        dest: Lvalue,
+        dest_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, bool> {
+        let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
+        self.write_primval(dest, val, dest_ty)?;
+        Ok(overflowed)
+    }
+}
+
+macro_rules! overflow {
+    ($op:ident, $l:expr, $r:expr) => ({
+        let (val, overflowed) = $l.$op($r);
+        let primval = PrimVal::Bytes(val as u128);
+        Ok((primval, overflowed))
+    })
+}
+
+macro_rules! int_arithmetic {
+    ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
+        let l = $l;
+        let r = $r;
+        use super::PrimValKind::*;
+        match $kind {
+            I8  => overflow!($int_op, l as i8,  r as i8),
+            I16 => overflow!($int_op, l as i16, r as i16),
+            I32 => overflow!($int_op, l as i32, r as i32),
+            I64 => overflow!($int_op, l as i64, r as i64),
+            I128 => overflow!($int_op, l as i128, r as i128),
+            U8  => overflow!($int_op, l as u8,  r as u8),
+            U16 => overflow!($int_op, l as u16, r as u16),
+            U32 => overflow!($int_op, l as u32, r as u32),
+            U64 => overflow!($int_op, l as u64, r as u64),
+            U128 => overflow!($int_op, l as u128, r as u128),
+            _ => bug!("int_arithmetic should only be called on int primvals"),
+        }
+    })
+}
+
+macro_rules! int_shift {
+    ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
+        let l = $l;
+        let r = $r;
+        let r_wrapped = r as u32;
+        match $kind {
+            I8  => overflow!($int_op, l as i8,  r_wrapped),
+            I16 => overflow!($int_op, l as i16, r_wrapped),
+            I32 => overflow!($int_op, l as i32, r_wrapped),
+            I64 => overflow!($int_op, l as i64, r_wrapped),
+            I128 => overflow!($int_op, l as i128, r_wrapped),
+            U8  => overflow!($int_op, l as u8,  r_wrapped),
+            U16 => overflow!($int_op, l as u16, r_wrapped),
+            U32 => overflow!($int_op, l as u32, r_wrapped),
+            U64 => overflow!($int_op, l as u64, r_wrapped),
+            U128 => overflow!($int_op, l as u128, r_wrapped),
+            _ => bug!("int_shift should only be called on int primvals"),
+        }.map(|(val, over)| (val, over || r != r_wrapped as u128))
+    })
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    /// Returns the result of the specified operation and whether it overflowed.
+    pub fn binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: PrimVal,
+        left_ty: Ty<'tcx>,
+        right: PrimVal,
+        right_ty: Ty<'tcx>,
+    ) -> EvalResult<'tcx, (PrimVal, bool)> {
+        use rustc::mir::BinOp::*;
+        use super::PrimValKind::*;
+
+        let left_kind = self.ty_to_primval_kind(left_ty)?;
+        let right_kind = self.ty_to_primval_kind(right_ty)?;
+        //trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
+
+        // I: Handle operations that support pointers
+        if !left_kind.is_float() && !right_kind.is_float() {
+            if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? {
+                return Ok(handled);
+            }
+        }
+
+        // II: From now on, everything must be bytes, no pointers
+        let l = left.to_bytes()?;
+        let r = right.to_bytes()?;
+
+        // These ops can have an RHS with a different numeric type.
+        if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) {
+            return match bin_op {
+                Shl => int_shift!(left_kind, overflowing_shl, l, r),
+                Shr => int_shift!(left_kind, overflowing_shr, l, r),
+                _ => bug!("it has already been checked that this is a shift op"),
+            };
+        }
+
+        if left_kind != right_kind {
+            let msg = format!(
+                "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+                bin_op,
+                left,
+                left_kind,
+                right,
+                right_kind
+            );
+            return err!(Unimplemented(msg));
+        }
+
+        let float_op = |op, l, r, ty| {
+            let l = ConstFloat {
+                bits: l,
+                ty,
+            };
+            let r = ConstFloat {
+                bits: r,
+                ty,
+            };
+            match op {
+                Eq => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Equal),
+                Ne => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Equal),
+                Lt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Less),
+                Le => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Greater),
+                Gt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Greater),
+                Ge => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Less),
+                Add => PrimVal::Bytes((l + r).unwrap().bits),
+                Sub => PrimVal::Bytes((l - r).unwrap().bits),
+                Mul => PrimVal::Bytes((l * r).unwrap().bits),
+                Div => PrimVal::Bytes((l / r).unwrap().bits),
+                Rem => PrimVal::Bytes((l % r).unwrap().bits),
+                _ => bug!("invalid float op: `{:?}`", op),
+            }
+        };
+
+        let val = match (bin_op, left_kind) {
+            (_, F32) => float_op(bin_op, l, r, FloatTy::F32),
+            (_, F64) => float_op(bin_op, l, r, FloatTy::F64),
+
+
+            (Eq, _) => PrimVal::from_bool(l == r),
+            (Ne, _) => PrimVal::from_bool(l != r),
+
+            (Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)),
+            (Lt, _) => PrimVal::from_bool(l < r),
+            (Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)),
+            (Le, _) => PrimVal::from_bool(l <= r),
+            (Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)),
+            (Gt, _) => PrimVal::from_bool(l > r),
+            (Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)),
+            (Ge, _) => PrimVal::from_bool(l >= r),
+
+            (BitOr, _) => PrimVal::Bytes(l | r),
+            (BitAnd, _) => PrimVal::Bytes(l & r),
+            (BitXor, _) => PrimVal::Bytes(l ^ r),
+
+            (Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r),
+            (Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r),
+            (Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r),
+            (Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r),
+            (Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r),
+
+            _ => {
+                let msg = format!(
+                    "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+                    bin_op,
+                    left,
+                    left_kind,
+                    right,
+                    right_kind
+                );
+                return err!(Unimplemented(msg));
+            }
+        };
+
+        Ok((val, false))
+    }
+}
+
+pub fn unary_op<'tcx>(
+    un_op: mir::UnOp,
+    val: PrimVal,
+    val_kind: PrimValKind,
+) -> EvalResult<'tcx, PrimVal> {
+    use rustc::mir::UnOp::*;
+    use super::PrimValKind::*;
+
+    let bytes = val.to_bytes()?;
+
+    let result_bytes = match (un_op, val_kind) {
+        (Not, Bool) => !val.to_bool()? as u128,
+
+        (Not, U8) => !(bytes as u8) as u128,
+        (Not, U16) => !(bytes as u16) as u128,
+        (Not, U32) => !(bytes as u32) as u128,
+        (Not, U64) => !(bytes as u64) as u128,
+        (Not, U128) => !bytes,
+
+        (Not, I8) => !(bytes as i8) as u128,
+        (Not, I16) => !(bytes as i16) as u128,
+        (Not, I32) => !(bytes as i32) as u128,
+        (Not, I64) => !(bytes as i64) as u128,
+        (Not, I128) => !(bytes as i128) as u128,
+
+        (Neg, I8) => -(bytes as i8) as u128,
+        (Neg, I16) => -(bytes as i16) as u128,
+        (Neg, I32) => -(bytes as i32) as u128,
+        (Neg, I64) => -(bytes as i64) as u128,
+        (Neg, I128) => -(bytes as i128) as u128,
+
+        (Neg, F32) => f32_to_bytes(-bytes_to_f32(bytes)),
+        (Neg, F64) => f64_to_bytes(-bytes_to_f64(bytes)),
+
+        _ => {
+            let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val);
+            return err!(Unimplemented(msg));
+        }
+    };
+
+    Ok(PrimVal::Bytes(result_bytes))
+}
diff --git a/src/librustc/mir/interpret/range_map.rs b/src/librustc/mir/interpret/range_map.rs
new file mode 100644 (file)
index 0000000..5cdcbe3
--- /dev/null
@@ -0,0 +1,250 @@
+//! Implements a map from integer indices to data.
+//! Rather than storing data for every index, internally, this maps entire ranges to the data.
+//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
+//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated).
+//! Users must not depend on whether a range is coalesced or not, even though this is observable
+//! via the iteration APIs.
+use std::collections::BTreeMap;
+use std::ops;
+
+#[derive(Clone, Debug)]
+pub struct RangeMap<T> {
+    map: BTreeMap<Range, T>,
+}
+
+// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
+// by the second field.
+// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
+// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
+// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
+// This kind of search breaks, if `end < start`, so don't do that!
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
+struct Range {
+    start: u64,
+    end: u64, // Invariant: end > start
+}
+
+impl Range {
+    fn range(offset: u64, len: u64) -> ops::Range<Range> {
+        assert!(len > 0);
+        // We select all elements that are within
+        // the range given by the offset into the allocation and the length.
+        // This is sound if all ranges that intersect with the argument range, are in the
+        // resulting range of ranges.
+        let left = Range {
+            // lowest range to include `offset`
+            start: 0,
+            end: offset + 1,
+        };
+        let right = Range {
+            // lowest (valid) range not to include `offset+len`
+            start: offset + len,
+            end: offset + len + 1,
+        };
+        left..right
+    }
+
+    /// Tests if all of [offset, offset+len) are contained in this range.
+    fn overlaps(&self, offset: u64, len: u64) -> bool {
+        assert!(len > 0);
+        offset < self.end && offset + len >= self.start
+    }
+}
+
+impl<T> RangeMap<T> {
+    pub fn new() -> RangeMap<T> {
+        RangeMap { map: BTreeMap::new() }
+    }
+
+    fn iter_with_range<'a>(
+        &'a self,
+        offset: u64,
+        len: u64,
+    ) -> impl Iterator<Item = (&'a Range, &'a T)> + 'a {
+        assert!(len > 0);
+        self.map.range(Range::range(offset, len)).filter_map(
+            move |(range,
+                   data)| {
+                if range.overlaps(offset, len) {
+                    Some((range, data))
+                } else {
+                    None
+                }
+            },
+        )
+    }
+
+    pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item = &'a T> + 'a {
+        self.iter_with_range(offset, len).map(|(_, data)| data)
+    }
+
+    fn split_entry_at(&mut self, offset: u64)
+    where
+        T: Clone,
+    {
+        let range = match self.iter_with_range(offset, 1).next() {
+            Some((&range, _)) => range,
+            None => return,
+        };
+        assert!(
+            range.start <= offset && range.end > offset,
+            "We got a range that doesn't even contain what we asked for."
+        );
+        // There is an entry overlapping this position, see if we have to split it
+        if range.start < offset {
+            let data = self.map.remove(&range).unwrap();
+            let old = self.map.insert(
+                Range {
+                    start: range.start,
+                    end: offset,
+                },
+                data.clone(),
+            );
+            assert!(old.is_none());
+            let old = self.map.insert(
+                Range {
+                    start: offset,
+                    end: range.end,
+                },
+                data,
+            );
+            assert!(old.is_none());
+        }
+    }
+
+    pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
+        self.map.values_mut()
+    }
+
+    /// Provide mutable iteration over everything in the given range.  As a side-effect,
+    /// this will split entries in the map that are only partially hit by the given range,
+    /// to make sure that when they are mutated, the effect is constrained to the given range.
+    pub fn iter_mut_with_gaps<'a>(
+        &'a mut self,
+        offset: u64,
+        len: u64,
+    ) -> impl Iterator<Item = &'a mut T> + 'a
+    where
+        T: Clone,
+    {
+        assert!(len > 0);
+        // Preparation: Split first and last entry as needed.
+        self.split_entry_at(offset);
+        self.split_entry_at(offset + len);
+        // Now we can provide a mutable iterator
+        self.map.range_mut(Range::range(offset, len)).filter_map(
+            move |(&range, data)| {
+                if range.overlaps(offset, len) {
+                    assert!(
+                        offset <= range.start && offset + len >= range.end,
+                        "The splitting went wrong"
+                    );
+                    Some(data)
+                } else {
+                    // Skip this one
+                    None
+                }
+            },
+        )
+    }
+
+    /// Provide a mutable iterator over everything in the given range, with the same side-effects as
+    /// iter_mut_with_gaps.  Furthermore, if there are gaps between ranges, fill them with the given default.
+    /// This is also how you insert.
+    pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item = &'a mut T> + 'a
+    where
+        T: Clone + Default,
+    {
+        // Do a first iteration to collect the gaps
+        let mut gaps = Vec::new();
+        let mut last_end = offset;
+        for (range, _) in self.iter_with_range(offset, len) {
+            if last_end < range.start {
+                gaps.push(Range {
+                    start: last_end,
+                    end: range.start,
+                });
+            }
+            last_end = range.end;
+        }
+        if last_end < offset + len {
+            gaps.push(Range {
+                start: last_end,
+                end: offset + len,
+            });
+        }
+
+        // Add default for all gaps
+        for gap in gaps {
+            let old = self.map.insert(gap, Default::default());
+            assert!(old.is_none());
+        }
+
+        // Now provide mutable iteration
+        self.iter_mut_with_gaps(offset, len)
+    }
+
+    pub fn retain<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&T) -> bool,
+    {
+        let mut remove = Vec::new();
+        for (range, data) in self.map.iter() {
+            if !f(data) {
+                remove.push(*range);
+            }
+        }
+
+        for range in remove {
+            self.map.remove(&range);
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    /// Query the map at every offset in the range and collect the results.
+    fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
+        (offset..offset + len)
+            .into_iter()
+            .map(|i| *map.iter(i, 1).next().unwrap())
+            .collect()
+    }
+
+    #[test]
+    fn basic_insert() {
+        let mut map = RangeMap::<i32>::new();
+        // Insert
+        for x in map.iter_mut(10, 1) {
+            *x = 42;
+        }
+        // Check
+        assert_eq!(to_vec(&map, 10, 1), vec![42]);
+    }
+
+    #[test]
+    fn gaps() {
+        let mut map = RangeMap::<i32>::new();
+        for x in map.iter_mut(11, 1) {
+            *x = 42;
+        }
+        for x in map.iter_mut(15, 1) {
+            *x = 42;
+        }
+
+        // Now request a range that needs three gaps filled
+        for x in map.iter_mut(10, 10) {
+            if *x != 42 {
+                *x = 23;
+            }
+        }
+
+        assert_eq!(
+            to_vec(&map, 10, 10),
+            vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]
+        );
+        assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]);
+    }
+}
diff --git a/src/librustc/mir/interpret/step.rs b/src/librustc/mir/interpret/step.rs
new file mode 100644 (file)
index 0000000..c701ebf
--- /dev/null
@@ -0,0 +1,402 @@
+//! This module contains the `EvalContext` methods for executing a single step of the interpreter.
+//!
+//! The main entry point is the `step` method.
+
+use rustc::hir::def_id::DefId;
+use rustc::hir;
+use rustc::mir::visit::{Visitor, LvalueContext};
+use rustc::mir;
+use rustc::traits::Reveal;
+use rustc::ty;
+use rustc::ty::layout::Layout;
+use rustc::ty::subst::Substs;
+use rustc::middle::const_val::ConstVal;
+
+use super::{EvalResult, EvalContext, StackPopCleanup, PtrAndAlign, GlobalId, Lvalue,
+            MemoryKind, Machine, PrimVal};
+
+use syntax::codemap::Span;
+use syntax::ast::Mutability;
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> {
+        self.steps_remaining = self.steps_remaining.saturating_sub(n);
+        if self.steps_remaining > 0 {
+            Ok(())
+        } else {
+            err!(ExecutionTimeLimitReached)
+        }
+    }
+
+    /// Returns true as long as there are more things to do.
+    pub fn step(&mut self) -> EvalResult<'tcx, bool> {
+        self.inc_step_counter_and_check_limit(1)?;
+        if self.stack.is_empty() {
+            return Ok(false);
+        }
+
+        let block = self.frame().block;
+        let stmt_id = self.frame().stmt;
+        let mir = self.mir();
+        let basic_block = &mir.basic_blocks()[block];
+
+        if let Some(stmt) = basic_block.statements.get(stmt_id) {
+            let mut new = Ok(0);
+            ConstantExtractor {
+                span: stmt.source_info.span,
+                instance: self.frame().instance,
+                ecx: self,
+                mir,
+                new_constants: &mut new,
+            }.visit_statement(
+                block,
+                stmt,
+                mir::Location {
+                    block,
+                    statement_index: stmt_id,
+                },
+            );
+            // if ConstantExtractor added new frames, we don't execute anything here
+            // but await the next call to step
+            if new? == 0 {
+                self.statement(stmt)?;
+            }
+            return Ok(true);
+        }
+
+        let terminator = basic_block.terminator();
+        let mut new = Ok(0);
+        ConstantExtractor {
+            span: terminator.source_info.span,
+            instance: self.frame().instance,
+            ecx: self,
+            mir,
+            new_constants: &mut new,
+        }.visit_terminator(
+            block,
+            terminator,
+            mir::Location {
+                block,
+                statement_index: stmt_id,
+            },
+        );
+        // if ConstantExtractor added new frames, we don't execute anything here
+        // but await the next call to step
+        if new? == 0 {
+            self.terminator(terminator)?;
+        }
+        Ok(true)
+    }
+
+    fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
+        trace!("{:?}", stmt);
+
+        use rustc::mir::StatementKind::*;
+
+        // Some statements (e.g. box) push new stack frames.  We have to record the stack frame number
+        // *before* executing the statement.
+        let frame_idx = self.cur_frame();
+
+        match stmt.kind {
+            Assign(ref lvalue, ref rvalue) => self.eval_rvalue_into_lvalue(rvalue, lvalue)?,
+
+            SetDiscriminant {
+                ref lvalue,
+                variant_index,
+            } => {
+                let dest = self.eval_lvalue(lvalue)?;
+                let dest_ty = self.lvalue_ty(lvalue);
+                let dest_layout = self.type_layout(dest_ty)?;
+
+                match *dest_layout {
+                    Layout::General { discr, .. } => {
+                        let discr_size = discr.size().bytes();
+                        let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
+                        self.memory.write_primval(
+                            dest_ptr,
+                            PrimVal::Bytes(variant_index as u128),
+                            discr_size,
+                            false
+                        )?
+                    }
+
+                    Layout::RawNullablePointer { nndiscr, .. } => {
+                        if variant_index as u64 != nndiscr {
+                            self.write_null(dest, dest_ty)?;
+                        }
+                    }
+
+                    Layout::StructWrappedNullablePointer {
+                        nndiscr,
+                        ref discrfield_source,
+                        ..
+                    } => {
+                        if variant_index as u64 != nndiscr {
+                            self.write_struct_wrapped_null_pointer(
+                                dest_ty,
+                                nndiscr,
+                                discrfield_source,
+                                dest,
+                            )?;
+                        }
+                    }
+
+                    _ => {
+                        bug!(
+                            "SetDiscriminant on {} represented as {:#?}",
+                            dest_ty,
+                            dest_layout
+                        )
+                    }
+                }
+            }
+
+            // Mark locals as alive
+            StorageLive(local) => {
+                let old_val = self.frame_mut().storage_live(local)?;
+                self.deallocate_local(old_val)?;
+            }
+
+            // Mark locals as dead
+            StorageDead(local) => {
+                let old_val = self.frame_mut().storage_dead(local)?;
+                self.deallocate_local(old_val)?;
+            }
+
+            // Validity checks.
+            Validate(op, ref lvalues) => {
+                for operand in lvalues {
+                    self.validation_op(op, operand)?;
+                }
+            }
+            EndRegion(ce) => {
+                self.end_region(Some(ce))?;
+            }
+
+            // Defined to do nothing. These are added by optimization passes, to avoid changing the
+            // size of MIR constantly.
+            Nop => {}
+
+            InlineAsm { .. } => return err!(InlineAsm),
+        }
+
+        self.stack[frame_idx].stmt += 1;
+        Ok(())
+    }
+
+    fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
+        trace!("{:?}", terminator.kind);
+        self.eval_terminator(terminator)?;
+        if !self.stack.is_empty() {
+            trace!("// {:?}", self.frame().block);
+        }
+        Ok(())
+    }
+
+    /// returns `true` if a stackframe was pushed
+    fn global_item(
+        &mut self,
+        def_id: DefId,
+        substs: &'tcx Substs<'tcx>,
+        span: Span,
+        mutability: Mutability,
+    ) -> EvalResult<'tcx, bool> {
+        let instance = self.resolve_associated_const(def_id, substs);
+        let cid = GlobalId {
+            instance,
+            promoted: None,
+        };
+        if self.globals.contains_key(&cid) {
+            return Ok(false);
+        }
+        if self.tcx.has_attr(def_id, "linkage") {
+            M::global_item_with_linkage(self, cid.instance, mutability)?;
+            return Ok(false);
+        }
+        let mir = self.load_mir(instance.def)?;
+        let size = self.type_size_with_substs(mir.return_ty, substs)?.expect(
+            "unsized global",
+        );
+        let align = self.type_align_with_substs(mir.return_ty, substs)?;
+        let ptr = self.memory.allocate(
+            size,
+            align,
+            MemoryKind::UninitializedStatic,
+        )?;
+        let aligned = !self.is_packed(mir.return_ty)?;
+        self.globals.insert(
+            cid,
+            PtrAndAlign {
+                ptr: ptr.into(),
+                aligned,
+            },
+        );
+        let internally_mutable = !mir.return_ty.is_freeze(
+            self.tcx,
+            ty::ParamEnv::empty(Reveal::All),
+            span,
+        );
+        let mutability = if mutability == Mutability::Mutable || internally_mutable {
+            Mutability::Mutable
+        } else {
+            Mutability::Immutable
+        };
+        let cleanup = StackPopCleanup::MarkStatic(mutability);
+        let name = ty::tls::with(|tcx| tcx.item_path_str(def_id));
+        trace!("pushing stack frame for global: {}", name);
+        self.push_stack_frame(
+            instance,
+            span,
+            mir,
+            Lvalue::from_ptr(ptr),
+            cleanup,
+        )?;
+        Ok(true)
+    }
+}
+
+// WARNING: This code pushes new stack frames.  Make sure that any methods implemented on this
+// type don't ever access ecx.stack[ecx.cur_frame()], as that will change. This includes, e.g.,
+// using the current stack frame's substitution.
+// Basically don't call anything other than `load_mir`, `alloc_ptr`, `push_stack_frame`.
+struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b, M: Machine<'tcx> + 'a> {
+    span: Span,
+    ecx: &'a mut EvalContext<'b, 'tcx, M>,
+    mir: &'tcx mir::Mir<'tcx>,
+    instance: ty::Instance<'tcx>,
+    new_constants: &'a mut EvalResult<'tcx, u64>,
+}
+
+impl<'a, 'b, 'tcx, M: Machine<'tcx>> ConstantExtractor<'a, 'b, 'tcx, M> {
+    fn try<F: FnOnce(&mut Self) -> EvalResult<'tcx, bool>>(&mut self, f: F) {
+        // previous constant errored
+        let n = match *self.new_constants {
+            Ok(n) => n,
+            Err(_) => return,
+        };
+        match f(self) {
+            // everything ok + a new stackframe
+            Ok(true) => *self.new_constants = Ok(n + 1),
+            // constant correctly evaluated, but no new stackframe
+            Ok(false) => {}
+            // constant eval errored
+            Err(err) => *self.new_constants = Err(err),
+        }
+    }
+}
+
+impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx, M> {
+    fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) {
+        self.super_constant(constant, location);
+        match constant.literal {
+            // already computed by rustc
+            mir::Literal::Value { value: &ty::Const { val: ConstVal::Unevaluated(def_id, substs), .. } } => {
+                self.try(|this| {
+                    this.ecx.global_item(
+                        def_id,
+                        substs,
+                        constant.span,
+                        Mutability::Immutable,
+                    )
+                });
+            }
+            mir::Literal::Value { .. } => {}
+            mir::Literal::Promoted { index } => {
+                let cid = GlobalId {
+                    instance: self.instance,
+                    promoted: Some(index),
+                };
+                if self.ecx.globals.contains_key(&cid) {
+                    return;
+                }
+                let mir = &self.mir.promoted[index];
+                self.try(|this| {
+                    let size = this.ecx
+                        .type_size_with_substs(mir.return_ty, this.instance.substs)?
+                        .expect("unsized global");
+                    let align = this.ecx.type_align_with_substs(
+                        mir.return_ty,
+                        this.instance.substs,
+                    )?;
+                    let ptr = this.ecx.memory.allocate(
+                        size,
+                        align,
+                        MemoryKind::UninitializedStatic,
+                    )?;
+                    let aligned = !this.ecx.is_packed(mir.return_ty)?;
+                    this.ecx.globals.insert(
+                        cid,
+                        PtrAndAlign {
+                            ptr: ptr.into(),
+                            aligned,
+                        },
+                    );
+                    trace!("pushing stack frame for {:?}", index);
+                    this.ecx.push_stack_frame(
+                        this.instance,
+                        constant.span,
+                        mir,
+                        Lvalue::from_ptr(ptr),
+                        StackPopCleanup::MarkStatic(Mutability::Immutable),
+                    )?;
+                    Ok(true)
+                });
+            }
+        }
+    }
+
+    fn visit_lvalue(
+        &mut self,
+        lvalue: &mir::Lvalue<'tcx>,
+        context: LvalueContext<'tcx>,
+        location: mir::Location,
+    ) {
+        self.super_lvalue(lvalue, context, location);
+        if let mir::Lvalue::Static(ref static_) = *lvalue {
+            let def_id = static_.def_id;
+            let substs = self.ecx.tcx.intern_substs(&[]);
+            let span = self.span;
+            if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) {
+                if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item {
+                    if let hir::ItemStatic(_, m, _) = *node {
+                        self.try(|this| {
+                            this.ecx.global_item(
+                                def_id,
+                                substs,
+                                span,
+                                if m == hir::MutMutable {
+                                    Mutability::Mutable
+                                } else {
+                                    Mutability::Immutable
+                                },
+                            )
+                        });
+                        return;
+                    } else {
+                        bug!("static def id doesn't point to static");
+                    }
+                } else {
+                    bug!("static def id doesn't point to item");
+                }
+            } else {
+                let def = self.ecx.tcx.describe_def(def_id).expect("static not found");
+                if let hir::def::Def::Static(_, mutable) = def {
+                    self.try(|this| {
+                        this.ecx.global_item(
+                            def_id,
+                            substs,
+                            span,
+                            if mutable {
+                                Mutability::Mutable
+                            } else {
+                                Mutability::Immutable
+                            },
+                        )
+                    });
+                } else {
+                    bug!("static found but isn't a static: {:?}", def);
+                }
+            }
+        }
+    }
+}
diff --git a/src/librustc/mir/interpret/terminator/drop.rs b/src/librustc/mir/interpret/terminator/drop.rs
new file mode 100644 (file)
index 0000000..6596cf9
--- /dev/null
@@ -0,0 +1,83 @@
+use rustc::mir::BasicBlock;
+use rustc::ty::{self, Ty};
+use syntax::codemap::Span;
+
+use interpret::{EvalResult, EvalContext, Lvalue, LvalueExtra, PrimVal, Value,
+                Machine, ValTy};
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    pub(crate) fn drop_lvalue(
+        &mut self,
+        lval: Lvalue,
+        instance: ty::Instance<'tcx>,
+        ty: Ty<'tcx>,
+        span: Span,
+        target: BasicBlock,
+    ) -> EvalResult<'tcx> {
+        trace!("drop_lvalue: {:#?}", lval);
+        // We take the address of the object.  This may well be unaligned, which is fine for us here.
+        // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
+        // by rustc.
+        let val = match self.force_allocation(lval)? {
+            Lvalue::Ptr {
+                ptr,
+                extra: LvalueExtra::Vtable(vtable),
+            } => ptr.ptr.to_value_with_vtable(vtable),
+            Lvalue::Ptr {
+                ptr,
+                extra: LvalueExtra::Length(len),
+            } => ptr.ptr.to_value_with_len(len),
+            Lvalue::Ptr {
+                ptr,
+                extra: LvalueExtra::None,
+            } => ptr.ptr.to_value(),
+            _ => bug!("force_allocation broken"),
+        };
+        self.drop(val, instance, ty, span, target)
+    }
+
+    fn drop(
+        &mut self,
+        arg: Value,
+        instance: ty::Instance<'tcx>,
+        ty: Ty<'tcx>,
+        span: Span,
+        target: BasicBlock,
+    ) -> EvalResult<'tcx> {
+        trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
+
+        let instance = match ty.sty {
+            ty::TyDynamic(..) => {
+                let vtable = match arg {
+                    Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable,
+                    _ => bug!("expected fat ptr, got {:?}", arg),
+                };
+                match self.read_drop_type_from_vtable(vtable)? {
+                    Some(func) => func,
+                    // no drop fn -> bail out
+                    None => {
+                        self.goto_block(target);
+                        return Ok(())
+                    },
+                }
+            }
+            _ => instance,
+        };
+
+        // the drop function expects a reference to the value
+        let valty = ValTy {
+            value: arg,
+            ty: self.tcx.mk_mut_ptr(ty),
+        };
+
+        let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone();
+
+        self.eval_fn_call(
+            instance,
+            Some((Lvalue::undef(), target)),
+            &vec![valty],
+            span,
+            fn_sig,
+        )
+    }
+}
diff --git a/src/librustc/mir/interpret/terminator/mod.rs b/src/librustc/mir/interpret/terminator/mod.rs
new file mode 100644 (file)
index 0000000..e01777c
--- /dev/null
@@ -0,0 +1,411 @@
+use rustc::mir;
+use rustc::ty::{self, TypeVariants};
+use rustc::ty::layout::Layout;
+use syntax::codemap::Span;
+use syntax::abi::Abi;
+
+use super::{EvalResult, EvalContext, eval_context,
+            PtrAndAlign, Lvalue, PrimVal, Value, Machine, ValTy};
+
+use rustc_data_structures::indexed_vec::Idx;
+
+mod drop;
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    pub fn goto_block(&mut self, target: mir::BasicBlock) {
+        self.frame_mut().block = target;
+        self.frame_mut().stmt = 0;
+    }
+
+    pub(super) fn eval_terminator(
+        &mut self,
+        terminator: &mir::Terminator<'tcx>,
+    ) -> EvalResult<'tcx> {
+        use rustc::mir::TerminatorKind::*;
+        match terminator.kind {
+            Return => {
+                self.dump_local(self.frame().return_lvalue);
+                self.pop_stack_frame()?
+            }
+
+            Goto { target } => self.goto_block(target),
+
+            SwitchInt {
+                ref discr,
+                ref values,
+                ref targets,
+                ..
+            } => {
+                // FIXME(CTFE): forbid branching
+                let discr_val = self.eval_operand(discr)?;
+                let discr_prim = self.value_to_primval(discr_val)?;
+
+                // Branch to the `otherwise` case by default, if no match is found.
+                let mut target_block = targets[targets.len() - 1];
+
+                for (index, const_int) in values.iter().enumerate() {
+                    let prim = PrimVal::Bytes(const_int.to_u128_unchecked());
+                    if discr_prim.to_bytes()? == prim.to_bytes()? {
+                        target_block = targets[index];
+                        break;
+                    }
+                }
+
+                self.goto_block(target_block);
+            }
+
+            Call {
+                ref func,
+                ref args,
+                ref destination,
+                ..
+            } => {
+                let destination = match *destination {
+                    Some((ref lv, target)) => Some((self.eval_lvalue(lv)?, target)),
+                    None => None,
+                };
+
+                let func_ty = self.operand_ty(func);
+                let (fn_def, sig) = match func_ty.sty {
+                    ty::TyFnPtr(sig) => {
+                        let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?;
+                        let instance = self.memory.get_fn(fn_ptr)?;
+                        let instance_ty = instance.def.def_ty(self.tcx);
+                        let instance_ty = self.monomorphize(instance_ty, instance.substs);
+                        match instance_ty.sty {
+                            ty::TyFnDef(..) => {
+                                let real_sig = instance_ty.fn_sig(self.tcx);
+                                let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
+                                let real_sig = self.tcx.erase_late_bound_regions_and_normalize(&real_sig);
+                                if !self.check_sig_compat(sig, real_sig)? {
+                                    return err!(FunctionPointerTyMismatch(real_sig, sig));
+                                }
+                            }
+                            ref other => bug!("instance def ty: {:?}", other),
+                        }
+                        (instance, sig)
+                    }
+                    ty::TyFnDef(def_id, substs) => (
+                        eval_context::resolve(self.tcx, def_id, substs),
+                        func_ty.fn_sig(self.tcx),
+                    ),
+                    _ => {
+                        let msg = format!("can't handle callee of type {:?}", func_ty);
+                        return err!(Unimplemented(msg));
+                    }
+                };
+                let args = self.operands_to_args(args)?;
+                let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
+                self.eval_fn_call(
+                    fn_def,
+                    destination,
+                    &args,
+                    terminator.source_info.span,
+                    sig,
+                )?;
+            }
+
+            Drop {
+                ref location,
+                target,
+                ..
+            } => {
+                // FIXME(CTFE): forbid drop in const eval
+                let lval = self.eval_lvalue(location)?;
+                let ty = self.lvalue_ty(location);
+                let ty = eval_context::apply_param_substs(self.tcx, self.substs(), &ty);
+                trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
+
+                let instance = eval_context::resolve_drop_in_place(self.tcx, ty);
+                self.drop_lvalue(
+                    lval,
+                    instance,
+                    ty,
+                    terminator.source_info.span,
+                    target,
+                )?;
+            }
+
+            Assert {
+                ref cond,
+                expected,
+                ref msg,
+                target,
+                ..
+            } => {
+                let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?;
+                if expected == cond_val {
+                    self.goto_block(target);
+                } else {
+                    use rustc::mir::AssertMessage::*;
+                    return match *msg {
+                        BoundsCheck { ref len, ref index } => {
+                            let span = terminator.source_info.span;
+                            let len = self.eval_operand_to_primval(len)
+                                .expect("can't eval len")
+                                .to_u64()?;
+                            let index = self.eval_operand_to_primval(index)
+                                .expect("can't eval index")
+                                .to_u64()?;
+                            err!(ArrayIndexOutOfBounds(span, len, index))
+                        }
+                        Math(ref err) => {
+                            err!(Math(terminator.source_info.span, err.clone()))
+                        }
+                        GeneratorResumedAfterReturn |
+                        GeneratorResumedAfterPanic => unimplemented!(),
+                    };
+                }
+            }
+
+            Yield { .. } => unimplemented!("{:#?}", terminator.kind),
+            GeneratorDrop => unimplemented!(),
+            DropAndReplace { .. } => unimplemented!(),
+            Resume => unimplemented!(),
+            Unreachable => return err!(Unreachable),
+        }
+
+        Ok(())
+    }
+
+    /// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`.
+    /// FIXME: This should take into account the platform-dependent ABI description.
+    fn check_sig_compat(
+        &mut self,
+        sig: ty::FnSig<'tcx>,
+        real_sig: ty::FnSig<'tcx>,
+    ) -> EvalResult<'tcx, bool> {
+        fn check_ty_compat<'tcx>(ty: ty::Ty<'tcx>, real_ty: ty::Ty<'tcx>) -> bool {
+            if ty == real_ty {
+                return true;
+            } // This is actually a fast pointer comparison
+            return match (&ty.sty, &real_ty.sty) {
+                // Permit changing the pointer type of raw pointers and references as well as
+                // mutability of raw pointers.
+                // TODO: Should not be allowed when fat pointers are involved.
+                (&TypeVariants::TyRawPtr(_), &TypeVariants::TyRawPtr(_)) => true,
+                (&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => {
+                    ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
+                }
+                // rule out everything else
+                _ => false,
+            };
+        }
+
+        if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic &&
+            sig.inputs_and_output.len() == real_sig.inputs_and_output.len() &&
+            sig.inputs_and_output
+                .iter()
+                .zip(real_sig.inputs_and_output)
+                .all(|(ty, real_ty)| check_ty_compat(ty, real_ty))
+        {
+            // Definitely good.
+            return Ok(true);
+        }
+
+        if sig.variadic || real_sig.variadic {
+            // We're not touching this
+            return Ok(false);
+        }
+
+        // We need to allow what comes up when a non-capturing closure is cast to a fn().
+        match (sig.abi, real_sig.abi) {
+            (Abi::Rust, Abi::RustCall) // check the ABIs.  This makes the test here non-symmetric.
+                if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => {
+                // First argument of real_sig must be a ZST
+                let fst_ty = real_sig.inputs_and_output[0];
+                let layout = self.type_layout(fst_ty)?;
+                let size = layout.size(&self.tcx.data_layout).bytes();
+                if size == 0 {
+                    // Second argument must be a tuple matching the argument list of sig
+                    let snd_ty = real_sig.inputs_and_output[1];
+                    match snd_ty.sty {
+                        TypeVariants::TyTuple(tys, _) if sig.inputs().len() == tys.len() =>
+                            if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
+                                return Ok(true)
+                            },
+                        _ => {}
+                    }
+                }
+            }
+            _ => {}
+        };
+
+        // Nope, this doesn't work.
+        return Ok(false);
+    }
+
+    fn eval_fn_call(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        destination: Option<(Lvalue, mir::BasicBlock)>,
+        args: &[ValTy<'tcx>],
+        span: Span,
+        sig: ty::FnSig<'tcx>,
+    ) -> EvalResult<'tcx> {
+        trace!("eval_fn_call: {:#?}", instance);
+        match instance.def {
+            ty::InstanceDef::Intrinsic(..) => {
+                let (ret, target) = match destination {
+                    Some(dest) => dest,
+                    _ => return err!(Unreachable),
+                };
+                let ty = sig.output();
+                let layout = self.type_layout(ty)?;
+                M::call_intrinsic(self, instance, args, ret, ty, layout, target)?;
+                self.dump_local(ret);
+                Ok(())
+            }
+            // FIXME: figure out why we can't just go through the shim
+            ty::InstanceDef::ClosureOnceShim { .. } => {
+                if M::eval_fn_call(self, instance, destination, args, span, sig)? {
+                    return Ok(());
+                }
+                let mut arg_locals = self.frame().mir.args_iter();
+                match sig.abi {
+                    // closure as closure once
+                    Abi::RustCall => {
+                        for (arg_local, &valty) in arg_locals.zip(args) {
+                            let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
+                            self.write_value(valty, dest)?;
+                        }
+                    }
+                    // non capture closure as fn ptr
+                    // need to inject zst ptr for closure object (aka do nothing)
+                    // and need to pack arguments
+                    Abi::Rust => {
+                        trace!(
+                            "arg_locals: {:?}",
+                            self.frame().mir.args_iter().collect::<Vec<_>>()
+                        );
+                        trace!("args: {:?}", args);
+                        let local = arg_locals.nth(1).unwrap();
+                        for (i, &valty) in args.into_iter().enumerate() {
+                            let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(
+                                mir::Field::new(i),
+                                valty.ty,
+                            ))?;
+                            self.write_value(valty, dest)?;
+                        }
+                    }
+                    _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
+                }
+                Ok(())
+            }
+            ty::InstanceDef::FnPtrShim(..) |
+            ty::InstanceDef::DropGlue(..) |
+            ty::InstanceDef::CloneShim(..) |
+            ty::InstanceDef::Item(_) => {
+                // Push the stack frame, and potentially be entirely done if the call got hooked
+                if M::eval_fn_call(self, instance, destination, args, span, sig)? {
+                    return Ok(());
+                }
+
+                // Pass the arguments
+                let mut arg_locals = self.frame().mir.args_iter();
+                trace!("ABI: {:?}", sig.abi);
+                trace!(
+                    "arg_locals: {:?}",
+                    self.frame().mir.args_iter().collect::<Vec<_>>()
+                );
+                trace!("args: {:?}", args);
+                match sig.abi {
+                    Abi::RustCall => {
+                        assert_eq!(args.len(), 2);
+
+                        {
+                            // write first argument
+                            let first_local = arg_locals.next().unwrap();
+                            let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?;
+                            self.write_value(args[0], dest)?;
+                        }
+
+                        // unpack and write all other args
+                        let layout = self.type_layout(args[1].ty)?;
+                        if let (&ty::TyTuple(fields, _),
+                                &Layout::Univariant { ref variant, .. }) = (&args[1].ty.sty, layout)
+                        {
+                            trace!("fields: {:?}", fields);
+                            if self.frame().mir.args_iter().count() == fields.len() + 1 {
+                                let offsets = variant.offsets.iter().map(|s| s.bytes());
+                                match args[1].value {
+                                    Value::ByRef(PtrAndAlign { ptr, aligned }) => {
+                                        assert!(
+                                            aligned,
+                                            "Unaligned ByRef-values cannot occur as function arguments"
+                                        );
+                                        for ((offset, ty), arg_local) in
+                                            offsets.zip(fields).zip(arg_locals)
+                                        {
+                                            let arg = Value::by_ref(ptr.offset(offset, &self)?);
+                                            let dest =
+                                                self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
+                                            trace!(
+                                                "writing arg {:?} to {:?} (type: {})",
+                                                arg,
+                                                dest,
+                                                ty
+                                            );
+                                            let valty = ValTy {
+                                                value: arg,
+                                                ty,
+                                            };
+                                            self.write_value(valty, dest)?;
+                                        }
+                                    }
+                                    Value::ByVal(PrimVal::Undef) => {}
+                                    other => {
+                                        assert_eq!(fields.len(), 1);
+                                        let dest = self.eval_lvalue(&mir::Lvalue::Local(
+                                            arg_locals.next().unwrap(),
+                                        ))?;
+                                        let valty = ValTy {
+                                            value: other,
+                                            ty: fields[0],
+                                        };
+                                        self.write_value(valty, dest)?;
+                                    }
+                                }
+                            } else {
+                                trace!("manual impl of rust-call ABI");
+                                // called a manual impl of a rust-call function
+                                let dest = self.eval_lvalue(
+                                    &mir::Lvalue::Local(arg_locals.next().unwrap()),
+                                )?;
+                                self.write_value(args[1], dest)?;
+                            }
+                        } else {
+                            bug!(
+                                "rust-call ABI tuple argument was {:#?}, {:#?}",
+                                args[1].ty,
+                                layout
+                            );
+                        }
+                    }
+                    _ => {
+                        for (arg_local, &valty) in arg_locals.zip(args) {
+                            let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
+                            self.write_value(valty, dest)?;
+                        }
+                    }
+                }
+                Ok(())
+            }
+            // cannot use the shim here, because that will only result in infinite recursion
+            ty::InstanceDef::Virtual(_, idx) => {
+                let ptr_size = self.memory.pointer_size();
+                let (ptr, vtable) = args[0].into_ptr_vtable_pair(&self.memory)?;
+                let fn_ptr = self.memory.read_ptr_sized_unsigned(
+                    vtable.offset(ptr_size * (idx as u64 + 3), &self)?
+                )?.to_ptr()?;
+                let instance = self.memory.get_fn(fn_ptr)?;
+                let mut args = args.to_vec();
+                let ty = self.get_field_ty(args[0].ty, 0)?.ty; // TODO: packed flag is ignored
+                args[0].ty = ty;
+                args[0].value = ptr.to_value();
+                // recurse with concrete function
+                self.eval_fn_call(instance, destination, &args, span, sig)
+            }
+        }
+    }
+}
diff --git a/src/librustc/mir/interpret/traits.rs b/src/librustc/mir/interpret/traits.rs
new file mode 100644 (file)
index 0000000..3f7e10a
--- /dev/null
@@ -0,0 +1,137 @@
+use rustc::traits::{self, Reveal};
+use rustc::hir::def_id::DefId;
+use rustc::ty::subst::Substs;
+use rustc::ty::{self, Ty};
+use syntax::codemap::DUMMY_SP;
+use syntax::ast::{self, Mutability};
+
+use super::{EvalResult, EvalContext, eval_context, MemoryPointer, MemoryKind, Value, PrimVal,
+            Machine};
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    pub(crate) fn fulfill_obligation(
+        &self,
+        trait_ref: ty::PolyTraitRef<'tcx>,
+    ) -> traits::Vtable<'tcx, ()> {
+        // Do the initial selection for the obligation. This yields the shallow result we are
+        // looking for -- that is, what specific impl.
+        self.tcx.infer_ctxt().enter(|infcx| {
+            let mut selcx = traits::SelectionContext::new(&infcx);
+
+            let obligation = traits::Obligation::new(
+                traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID),
+                ty::ParamEnv::empty(Reveal::All),
+                trait_ref.to_poly_trait_predicate(),
+            );
+            let selection = selcx.select(&obligation).unwrap().unwrap();
+
+            // Currently, we use a fulfillment context to completely resolve all nested obligations.
+            // This is because they can inform the inference of the impl's type parameters.
+            let mut fulfill_cx = traits::FulfillmentContext::new();
+            let vtable = selection.map(|predicate| {
+                fulfill_cx.register_predicate_obligation(&infcx, predicate);
+            });
+            infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable)
+        })
+    }
+
+    /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
+    /// objects.
+    ///
+    /// The `trait_ref` encodes the erased self type. Hence if we are
+    /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
+    /// `trait_ref` would map `T:Trait`.
+    pub fn get_vtable(
+        &mut self,
+        ty: Ty<'tcx>,
+        trait_ref: ty::PolyTraitRef<'tcx>,
+    ) -> EvalResult<'tcx, MemoryPointer> {
+        debug!("get_vtable(trait_ref={:?})", trait_ref);
+
+        let size = self.type_size(trait_ref.self_ty())?.expect(
+            "can't create a vtable for an unsized type",
+        );
+        let align = self.type_align(trait_ref.self_ty())?;
+
+        let ptr_size = self.memory.pointer_size();
+        let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref);
+        let vtable = self.memory.allocate(
+            ptr_size * (3 + methods.count() as u64),
+            ptr_size,
+            MemoryKind::UninitializedStatic,
+        )?;
+
+        let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
+        let drop = self.memory.create_fn_alloc(drop);
+        self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?;
+
+        let size_ptr = vtable.offset(ptr_size, &self)?;
+        self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?;
+        let align_ptr = vtable.offset(ptr_size * 2, &self)?;
+        self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?;
+
+        for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() {
+            if let Some((def_id, substs)) = method {
+                let instance = eval_context::resolve(self.tcx, def_id, substs);
+                let fn_ptr = self.memory.create_fn_alloc(instance);
+                let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
+                self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?;
+            }
+        }
+
+        self.memory.mark_static_initalized(
+            vtable.alloc_id,
+            Mutability::Mutable,
+        )?;
+
+        Ok(vtable)
+    }
+
+    pub fn read_drop_type_from_vtable(
+        &self,
+        vtable: MemoryPointer,
+    ) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
+        // we don't care about the pointee type, we just want a pointer
+        match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? {
+            // some values don't need to call a drop impl, so the value is null
+            Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
+            Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
+            _ => err!(ReadBytesAsPointer),
+        }
+    }
+
+    pub fn read_size_and_align_from_vtable(
+        &self,
+        vtable: MemoryPointer,
+    ) -> EvalResult<'tcx, (u64, u64)> {
+        let pointer_size = self.memory.pointer_size();
+        let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64;
+        let align = self.memory.read_ptr_sized_unsigned(
+            vtable.offset(pointer_size * 2, self)?
+        )?.to_bytes()? as u64;
+        Ok((size, align))
+    }
+
+    pub(crate) fn resolve_associated_const(
+        &self,
+        def_id: DefId,
+        substs: &'tcx Substs<'tcx>,
+    ) -> ty::Instance<'tcx> {
+        if let Some(trait_id) = self.tcx.trait_of_item(def_id) {
+            let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs));
+            let vtable = self.fulfill_obligation(trait_ref);
+            if let traits::VtableImpl(vtable_impl) = vtable {
+                let name = self.tcx.item_name(def_id);
+                let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id).find(
+                    |item| {
+                        item.kind == ty::AssociatedKind::Const && item.name == name
+                    },
+                );
+                if let Some(assoc_const) = assoc_const_opt {
+                    return ty::Instance::new(assoc_const.def_id, vtable_impl.substs);
+                }
+            }
+        }
+        ty::Instance::new(def_id, substs)
+    }
+}
diff --git a/src/librustc/mir/interpret/validation.rs b/src/librustc/mir/interpret/validation.rs
new file mode 100644 (file)
index 0000000..9be9341
--- /dev/null
@@ -0,0 +1,727 @@
+use rustc::hir::{self, Mutability};
+use rustc::hir::Mutability::*;
+use rustc::mir::{self, ValidationOp, ValidationOperand};
+use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
+use rustc::ty::subst::{Substs, Subst};
+use rustc::traits;
+use rustc::infer::InferCtxt;
+use rustc::traits::Reveal;
+use rustc::middle::region;
+use rustc_data_structures::indexed_vec::Idx;
+
+use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value,
+            Lvalue, LvalueExtra, Machine, ValTy};
+
+pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsLvalue<'tcx>, Lvalue)>;
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum ValidationMode {
+    Acquire,
+    /// Recover because the given region ended
+    Recover(region::Scope),
+    ReleaseUntil(Option<region::Scope>),
+}
+
+impl ValidationMode {
+    fn acquiring(self) -> bool {
+        use self::ValidationMode::*;
+        match self {
+            Acquire | Recover(_) => true,
+            ReleaseUntil(_) => false,
+        }
+    }
+}
+
+// Abstract lvalues
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub enum AbsLvalue<'tcx> {
+    Local(mir::Local),
+    Static(hir::def_id::DefId),
+    Projection(Box<AbsLvalueProjection<'tcx>>),
+}
+
+type AbsLvalueProjection<'tcx> = mir::Projection<'tcx, AbsLvalue<'tcx>, u64, ()>;
+type AbsLvalueElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
+
+impl<'tcx> AbsLvalue<'tcx> {
+    pub fn field(self, f: mir::Field) -> AbsLvalue<'tcx> {
+        self.elem(mir::ProjectionElem::Field(f, ()))
+    }
+
+    pub fn deref(self) -> AbsLvalue<'tcx> {
+        self.elem(mir::ProjectionElem::Deref)
+    }
+
+    pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsLvalue<'tcx> {
+        self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
+    }
+
+    pub fn index(self, index: u64) -> AbsLvalue<'tcx> {
+        self.elem(mir::ProjectionElem::Index(index))
+    }
+
+    fn elem(self, elem: AbsLvalueElem<'tcx>) -> AbsLvalue<'tcx> {
+        AbsLvalue::Projection(Box::new(AbsLvalueProjection {
+            base: self,
+            elem,
+        }))
+    }
+}
+
+impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
+    fn abstract_lvalue_projection(&self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, AbsLvalueProjection<'tcx>> {
+        use self::mir::ProjectionElem::*;
+
+        let elem = match proj.elem {
+            Deref => Deref,
+            Field(f, _) => Field(f, ()),
+            Index(v) => {
+                let value = self.frame().get_local(v)?;
+                let ty = self.tcx.types.usize;
+                let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
+                Index(n)
+            },
+            ConstantIndex { offset, min_length, from_end } =>
+                ConstantIndex { offset, min_length, from_end },
+            Subslice { from, to } =>
+                Subslice { from, to },
+            Downcast(adt, sz) => Downcast(adt, sz),
+        };
+        Ok(AbsLvalueProjection {
+            base: self.abstract_lvalue(&proj.base)?,
+            elem
+        })
+    }
+
+    fn abstract_lvalue(&self, lval: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, AbsLvalue<'tcx>> {
+        Ok(match lval {
+            &mir::Lvalue::Local(l) => AbsLvalue::Local(l),
+            &mir::Lvalue::Static(ref s) => AbsLvalue::Static(s.def_id),
+            &mir::Lvalue::Projection(ref p) =>
+                AbsLvalue::Projection(Box::new(self.abstract_lvalue_projection(&*p)?)),
+        })
+    }
+
+    // Validity checks
+    pub(crate) fn validation_op(
+        &mut self,
+        op: ValidationOp,
+        operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>,
+    ) -> EvalResult<'tcx> {
+        // If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands
+        // because other crates may have been compiled with mir-emit-validate > 0.  Ignore those
+        // commands.  This makes mir-emit-validate also a flag to control whether miri will do
+        // validation or not.
+        if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
+            return Ok(());
+        }
+        debug_assert!(self.memory.cur_frame == self.cur_frame());
+
+        // HACK: Determine if this method is whitelisted and hence we do not perform any validation.
+        // We currently insta-UB on anything passing around uninitialized memory, so we have to whitelist
+        // the places that are allowed to do that.
+        // The second group is stuff libstd does that is forbidden even under relaxed validation.
+        {
+            // The regexp we use for filtering
+            use regex::Regex;
+            lazy_static! {
+                static ref RE: Regex = Regex::new("^(\
+                    (std|alloc::heap::__core)::mem::(uninitialized|forget)::|\
+                    <(std|alloc)::heap::Heap as (std::heap|alloc::allocator)::Alloc>::|\
+                    <(std|alloc::heap::__core)::mem::ManuallyDrop<T>><.*>::new$|\
+                    <(std|alloc::heap::__core)::mem::ManuallyDrop<T> as std::ops::DerefMut><.*>::deref_mut$|\
+                    (std|alloc::heap::__core)::ptr::read::|\
+                    \
+                    <std::sync::Arc<T>><.*>::inner$|\
+                    <std::sync::Arc<T>><.*>::drop_slow$|\
+                    (std::heap|alloc::allocator)::Layout::for_value::|\
+                    (std|alloc::heap::__core)::mem::(size|align)_of_val::\
+                )").unwrap();
+            }
+            // Now test
+            let name = self.stack[self.cur_frame()].instance.to_string();
+            if RE.is_match(&name) {
+                return Ok(());
+            }
+        }
+
+        // We need to monomorphize ty *without* erasing lifetimes
+        let ty = operand.ty.subst(self.tcx, self.substs());
+        let lval = self.eval_lvalue(&operand.lval)?;
+        let abs_lval = self.abstract_lvalue(&operand.lval)?;
+        let query = ValidationQuery {
+            lval: (abs_lval, lval),
+            ty,
+            re: operand.re,
+            mutbl: operand.mutbl,
+        };
+
+        // Check the mode, and also perform mode-specific operations
+        let mode = match op {
+            ValidationOp::Acquire => ValidationMode::Acquire,
+            ValidationOp::Release => ValidationMode::ReleaseUntil(None),
+            ValidationOp::Suspend(scope) => {
+                if query.mutbl == MutMutable {
+                    let lft = DynamicLifetime {
+                        frame: self.cur_frame(),
+                        region: Some(scope), // Notably, we only ever suspend things for given regions.
+                        // Suspending for the entire function does not make any sense.
+                    };
+                    trace!("Suspending {:?} until {:?}", query, scope);
+                    self.suspended.entry(lft).or_insert_with(Vec::new).push(
+                        query.clone(),
+                    );
+                }
+                ValidationMode::ReleaseUntil(Some(scope))
+            }
+        };
+        self.validate(query, mode)
+    }
+
+    /// Release locks and executes suspensions of the given region (or the entire fn, in case of None).
+    pub(crate) fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx> {
+        debug_assert!(self.memory.cur_frame == self.cur_frame());
+        self.memory.locks_lifetime_ended(scope);
+        match scope {
+            Some(scope) => {
+                // Recover suspended lvals
+                let lft = DynamicLifetime {
+                    frame: self.cur_frame(),
+                    region: Some(scope),
+                };
+                if let Some(queries) = self.suspended.remove(&lft) {
+                    for query in queries {
+                        trace!("Recovering {:?} from suspension", query);
+                        self.validate(query, ValidationMode::Recover(scope))?;
+                    }
+                }
+            }
+            None => {
+                // Clean suspension table of current frame
+                let cur_frame = self.cur_frame();
+                self.suspended.retain(|lft, _| {
+                    lft.frame != cur_frame // keep only what is in the other (lower) frames
+                });
+            }
+        }
+        Ok(())
+    }
+
+    fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        return normalize_associated_type(self.tcx, &ty);
+
+        use syntax::codemap::{Span, DUMMY_SP};
+
+        // We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
+        fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
+            self_: &InferCtxt<'a, 'gcx, 'tcx>,
+            param_env: ty::ParamEnv<'tcx>,
+            value: &T,
+        ) -> T::Lifted
+        where
+            T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
+        {
+            let mut selcx = traits::SelectionContext::new(self_);
+            let cause = traits::ObligationCause::dummy();
+            let traits::Normalized {
+                value: result,
+                obligations,
+            } = traits::normalize(&mut selcx, param_env, cause, value);
+
+            let mut fulfill_cx = traits::FulfillmentContext::new();
+
+            for obligation in obligations {
+                fulfill_cx.register_predicate_obligation(self_, obligation);
+            }
+
+            drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
+        }
+
+        fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
+            self_: &InferCtxt<'a, 'gcx, 'tcx>,
+            span: Span,
+            fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+            result: &T,
+        ) -> T::Lifted
+        where
+            T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
+        {
+            // In principle, we only need to do this so long as `result`
+            // contains unbound type parameters. It could be a slight
+            // optimization to stop iterating early.
+            match fulfill_cx.select_all_or_error(self_) {
+                Ok(()) => { }
+                Err(errors) => {
+                    span_bug!(
+                        span,
+                        "Encountered errors `{:?}` resolving bounds after type-checking",
+                        errors
+                    );
+                }
+            }
+
+            let result = self_.resolve_type_vars_if_possible(result);
+            let result = self_.tcx.fold_regions(
+                &result,
+                &mut false,
+                |r, _| match *r {
+                    ty::ReVar(_) => self_.tcx.types.re_erased,
+                    _ => r,
+                },
+            );
+
+            match self_.tcx.lift_to_global(&result) {
+                Some(result) => result,
+                None => {
+                    span_bug!(span, "Uninferred types/regions in `{:?}`", result);
+                }
+            }
+        }
+
+        trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
+            fn my_trans_normalize<'a, 'tcx>(
+                &self,
+                infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+                param_env: ty::ParamEnv<'tcx>,
+            ) -> Self;
+        }
+
+        macro_rules! items { ($($item:item)+) => ($($item)+) }
+        macro_rules! impl_trans_normalize {
+            ($lt_gcx:tt, $($ty:ty),+) => {
+                items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
+                    fn my_trans_normalize<'a, 'tcx>(&self,
+                                                infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
+                                                param_env: ty::ParamEnv<'tcx>)
+                                                -> Self {
+                        normalize_projections_in(infcx, param_env, self)
+                    }
+                })+);
+            }
+        }
+
+        impl_trans_normalize!('gcx,
+            Ty<'gcx>,
+            &'gcx Substs<'gcx>,
+            ty::FnSig<'gcx>,
+            ty::PolyFnSig<'gcx>,
+            ty::ClosureSubsts<'gcx>,
+            ty::PolyTraitRef<'gcx>,
+            ty::ExistentialTraitRef<'gcx>
+        );
+
+        fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
+        where
+            T: MyTransNormalize<'tcx>,
+        {
+            let param_env = ty::ParamEnv::empty(Reveal::All);
+
+            if !value.has_projections() {
+                return value.clone();
+            }
+
+            self_.infer_ctxt().enter(|infcx| {
+                value.my_trans_normalize(&infcx, param_env)
+            })
+        }
+    }
+
+    fn validate_variant(
+        &mut self,
+        query: ValidationQuery<'tcx>,
+        variant: &ty::VariantDef,
+        subst: &ty::subst::Substs<'tcx>,
+        mode: ValidationMode,
+    ) -> EvalResult<'tcx> {
+        // TODO: Maybe take visibility/privacy into account.
+        for (idx, field_def) in variant.fields.iter().enumerate() {
+            let field_ty = field_def.ty(self.tcx, subst);
+            let field = mir::Field::new(idx);
+            let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
+            self.validate(
+                ValidationQuery {
+                    lval: (query.lval.0.clone().field(field), field_lvalue),
+                    ty: field_ty,
+                    ..query
+                },
+                mode,
+            )?;
+        }
+        Ok(())
+    }
+
+    fn validate_ptr(
+        &mut self,
+        val: Value,
+        abs_lval: AbsLvalue<'tcx>,
+        pointee_ty: Ty<'tcx>,
+        re: Option<region::Scope>,
+        mutbl: Mutability,
+        mode: ValidationMode,
+    ) -> EvalResult<'tcx> {
+        // Check alignment and non-NULLness
+        let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
+        let ptr = val.into_ptr(&self.memory)?;
+        self.memory.check_align(ptr, align, None)?;
+
+        // Recurse
+        let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
+        self.validate(
+            ValidationQuery {
+                lval: (abs_lval.deref(), pointee_lvalue),
+                ty: pointee_ty,
+                re,
+                mutbl,
+            },
+            mode,
+        )
+    }
+
+    /// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks
+    fn validate(
+        &mut self,
+        mut query: ValidationQuery<'tcx>,
+        mode: ValidationMode,
+    ) -> EvalResult<'tcx> {
+        use rustc::ty::TypeVariants::*;
+        use rustc::ty::RegionKind::*;
+        use rustc::ty::AdtKind;
+
+        // No point releasing shared stuff.
+        if !mode.acquiring() && query.mutbl == MutImmutable {
+            return Ok(());
+        }
+        // When we recover, we may see data whose validity *just* ended.  Do not acquire it.
+        if let ValidationMode::Recover(ending_ce) = mode {
+            if query.re == Some(ending_ce) {
+                return Ok(());
+            }
+        }
+
+        query.ty = self.normalize_type_unerased(&query.ty);
+        trace!("{:?} on {:?}", mode, query);
+
+        // Decide whether this type *owns* the memory it covers (like integers), or whether it
+        // just assembles pieces (that each own their memory) together to a larger whole.
+        // TODO: Currently, we don't acquire locks for padding and discriminants. We should.
+        let is_owning = match query.ty.sty {
+            TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr |
+            TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
+            TyAdt(adt, _) if adt.is_box() => true,
+            TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) |
+            TyDynamic(..) | TyGenerator(..) => false,
+            TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => {
+                bug!("I got an incomplete/unnormalized type for validation")
+            }
+        };
+        if is_owning {
+            // We need to lock.  So we need memory.  So we have to force_acquire.
+            // Tracking the same state for locals not backed by memory would just duplicate too
+            // much machinery.
+            // FIXME: We ignore alignment.
+            let (ptr, extra) = self.force_allocation(query.lval.1)?.to_ptr_extra_aligned();
+            // Determine the size
+            // FIXME: Can we reuse size_and_align_of_dst for Lvalues?
+            let len = match self.type_size(query.ty)? {
+                Some(size) => {
+                    assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
+                    size
+                }
+                None => {
+                    // The only unsized typ we concider "owning" is TyStr.
+                    assert_eq!(
+                        query.ty.sty,
+                        TyStr,
+                        "Found a surprising unsized owning type"
+                    );
+                    // The extra must be the length, in bytes.
+                    match extra {
+                        LvalueExtra::Length(len) => len,
+                        _ => bug!("TyStr must have a length as extra"),
+                    }
+                }
+            };
+            // Handle locking
+            if len > 0 {
+                let ptr = ptr.to_ptr()?;
+                match query.mutbl {
+                    MutImmutable => {
+                        if mode.acquiring() {
+                            self.memory.acquire_lock(
+                                ptr,
+                                len,
+                                query.re,
+                                AccessKind::Read,
+                            )?;
+                        }
+                    }
+                    // No releasing of read locks, ever.
+                    MutMutable => {
+                        match mode {
+                            ValidationMode::Acquire => {
+                                self.memory.acquire_lock(
+                                    ptr,
+                                    len,
+                                    query.re,
+                                    AccessKind::Write,
+                                )?
+                            }
+                            ValidationMode::Recover(ending_ce) => {
+                                self.memory.recover_write_lock(
+                                    ptr,
+                                    len,
+                                    &query.lval.0,
+                                    query.re,
+                                    ending_ce,
+                                )?
+                            }
+                            ValidationMode::ReleaseUntil(suspended_ce) => {
+                                self.memory.suspend_write_lock(
+                                    ptr,
+                                    len,
+                                    &query.lval.0,
+                                    suspended_ce,
+                                )?
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        let res = do catch {
+            match query.ty.sty {
+                TyInt(_) | TyUint(_) | TyRawPtr(_) => {
+                    if mode.acquiring() {
+                        // Make sure we can read this.
+                        let val = self.read_lvalue(query.lval.1)?;
+                        self.follow_by_ref_value(val, query.ty)?;
+                        // FIXME: It would be great to rule out Undef here, but that doesn't actually work.
+                        // Passing around undef data is a thing that e.g. Vec::extend_with does.
+                    }
+                    Ok(())
+                }
+                TyBool | TyFloat(_) | TyChar => {
+                    if mode.acquiring() {
+                        let val = self.read_lvalue(query.lval.1)?;
+                        let val = self.value_to_primval(ValTy { value: val, ty: query.ty })?;
+                        val.to_bytes()?;
+                        // TODO: Check if these are valid bool/float/codepoint/UTF-8
+                    }
+                    Ok(())
+                }
+                TyNever => err!(ValidationFailure(format!("The empty type is never valid."))),
+                TyRef(region,
+                    ty::TypeAndMut {
+                        ty: pointee_ty,
+                        mutbl,
+                    }) => {
+                    let val = self.read_lvalue(query.lval.1)?;
+                    // Sharing restricts our context
+                    if mutbl == MutImmutable {
+                        query.mutbl = MutImmutable;
+                    }
+                    // Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
+                    // we record the region of this borrow to the context.
+                    if query.re == None {
+                        match *region {
+                            ReScope(scope) => query.re = Some(scope),
+                            // It is possible for us to encounter erased lifetimes here because the lifetimes in
+                            // this functions' Subst will be erased.
+                            _ => {}
+                        }
+                    }
+                    self.validate_ptr(val, query.lval.0, pointee_ty, query.re, query.mutbl, mode)
+                }
+                TyAdt(adt, _) if adt.is_box() => {
+                    let val = self.read_lvalue(query.lval.1)?;
+                    self.validate_ptr(val, query.lval.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)
+                }
+                TyFnPtr(_sig) => {
+                    let ptr = self.read_lvalue(query.lval.1)?
+                        .into_ptr(&self.memory)?
+                        .to_ptr()?;
+                    self.memory.get_fn(ptr)?;
+                    // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
+                    Ok(())
+                }
+                TyFnDef(..) => {
+                    // This is a zero-sized type with all relevant data sitting in the type.
+                    // There is nothing to validate.
+                    Ok(())
+                }
+
+                // Compound types
+                TyStr => {
+                    // TODO: Validate strings
+                    Ok(())
+                }
+                TySlice(elem_ty) => {
+                    let len = match query.lval.1 {
+                        Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
+                        _ => {
+                            bug!(
+                                "acquire_valid of a TySlice given non-slice lvalue: {:?}",
+                                query.lval
+                            )
+                        }
+                    };
+                    for i in 0..len {
+                        let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i)?;
+                        self.validate(
+                            ValidationQuery {
+                                lval: (query.lval.0.clone().index(i), inner_lvalue),
+                                ty: elem_ty,
+                                ..query
+                            },
+                            mode,
+                        )?;
+                    }
+                    Ok(())
+                }
+                TyArray(elem_ty, len) => {
+                    let len = len.val.to_const_int().unwrap().to_u64().unwrap();
+                    for i in 0..len {
+                        let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i as u64)?;
+                        self.validate(
+                            ValidationQuery {
+                                lval: (query.lval.0.clone().index(i as u64), inner_lvalue),
+                                ty: elem_ty,
+                                ..query
+                            },
+                            mode,
+                        )?;
+                    }
+                    Ok(())
+                }
+                TyDynamic(_data, _region) => {
+                    // Check that this is a valid vtable
+                    let vtable = match query.lval.1 {
+                        Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
+                        _ => {
+                            bug!(
+                                "acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}",
+                                query.lval
+                            )
+                        }
+                    };
+                    self.read_size_and_align_from_vtable(vtable)?;
+                    // TODO: Check that the vtable contains all the function pointers we expect it to have.
+                    // Trait objects cannot have any operations performed
+                    // on them directly.  We cannot, in general, even acquire any locks as the trait object *could*
+                    // contain an UnsafeCell.  If we call functions to get access to data, we will validate
+                    // their return values.  So, it doesn't seem like there's anything else to do.
+                    Ok(())
+                }
+                TyAdt(adt, subst) => {
+                    if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() &&
+                        query.mutbl == MutImmutable
+                    {
+                        // No locks for shared unsafe cells.  Also no other validation, the only field is private anyway.
+                        return Ok(());
+                    }
+
+                    match adt.adt_kind() {
+                        AdtKind::Enum => {
+                            // TODO: Can we get the discriminant without forcing an allocation?
+                            let ptr = self.force_allocation(query.lval.1)?.to_ptr()?;
+                            let discr = self.read_discriminant_value(ptr, query.ty)?;
+
+                            // Get variant index for discriminant
+                            let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| {
+                                variant_discr.to_u128_unchecked() == discr
+                            });
+                            let variant_idx = match variant_idx {
+                                Some(val) => val,
+                                None => return err!(InvalidDiscriminant),
+                            };
+                            let variant = &adt.variants[variant_idx];
+
+                            if variant.fields.len() > 0 {
+                                // Downcast to this variant, if needed
+                                let lval = if adt.variants.len() > 1 {
+                                    (
+                                        query.lval.0.downcast(adt, variant_idx),
+                                        self.eval_lvalue_projection(
+                                            query.lval.1,
+                                            query.ty,
+                                            &mir::ProjectionElem::Downcast(adt, variant_idx),
+                                        )?,
+                                    )
+                                } else {
+                                    query.lval
+                                };
+
+                                // Recursively validate the fields
+                                self.validate_variant(
+                                    ValidationQuery { lval, ..query },
+                                    variant,
+                                    subst,
+                                    mode,
+                                )
+                            } else {
+                                // No fields, nothing left to check.  Downcasting may fail, e.g. in case of a CEnum.
+                                Ok(())
+                            }
+                        }
+                        AdtKind::Struct => {
+                            self.validate_variant(query, adt.struct_variant(), subst, mode)
+                        }
+                        AdtKind::Union => {
+                            // No guarantees are provided for union types.
+                            // TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
+                            Ok(())
+                        }
+                    }
+                }
+                TyTuple(ref types, _) => {
+                    for (idx, field_ty) in types.iter().enumerate() {
+                        let field = mir::Field::new(idx);
+                        let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
+                        self.validate(
+                            ValidationQuery {
+                                lval: (query.lval.0.clone().field(field), field_lvalue),
+                                ty: field_ty,
+                                ..query
+                            },
+                            mode,
+                        )?;
+                    }
+                    Ok(())
+                }
+                TyClosure(def_id, ref closure_substs) => {
+                    for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
+                        let field = mir::Field::new(idx);
+                        let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
+                        self.validate(
+                            ValidationQuery {
+                                lval: (query.lval.0.clone().field(field), field_lvalue),
+                                ty: field_ty,
+                                ..query
+                            },
+                            mode,
+                        )?;
+                    }
+                    // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
+                    // Is there other things we can/should check?  Like vtable pointers?
+                    Ok(())
+                }
+                // FIXME: generators aren't validated right now
+                TyGenerator(..) => Ok(()),
+                _ => bug!("We already established that this is a type we support. ({})", query.ty),
+            }
+        };
+        match res {
+            // ReleaseUntil(None) of an uninitalized variable is a NOP.  This is needed because
+            // we have to release the return value of a function; due to destination-passing-style
+            // the callee may directly write there.
+            // TODO: Ideally we would know whether the destination is already initialized, and only
+            // release if it is.  But of course that can't even always be statically determined.
+            Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
+                if mode == ValidationMode::ReleaseUntil(None) => {
+                return Ok(());
+            }
+            res => res,
+        }
+    }
+}
diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs
new file mode 100644 (file)
index 0000000..e052ec1
--- /dev/null
@@ -0,0 +1,405 @@
+#![allow(unknown_lints)]
+
+use rustc::ty::layout::HasDataLayout;
+
+use super::{EvalResult, Memory, MemoryPointer, HasMemory, PointerArithmetic, Machine, PtrAndAlign};
+
+pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
+    f32::from_bits(bytes as u32)
+}
+
+pub(super) fn bytes_to_f64(bytes: u128) -> f64 {
+    f64::from_bits(bytes as u64)
+}
+
+pub(super) fn f32_to_bytes(f: f32) -> u128 {
+    f.to_bits() as u128
+}
+
+pub(super) fn f64_to_bytes(f: f64) -> u128 {
+    f.to_bits() as u128
+}
+
+/// A `Value` represents a single self-contained Rust value.
+///
+/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve
+/// value held directly, outside of any allocation (`ByVal`).  For `ByRef`-values, we remember
+/// whether the pointer is supposed to be aligned or not (also see Lvalue).
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and fat pointers. This idea was taken from rustc's trans.
+#[derive(Clone, Copy, Debug)]
+pub enum Value {
+    ByRef(PtrAndAlign),
+    ByVal(PrimVal),
+    ByValPair(PrimVal, PrimVal),
+}
+
+/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally.
+/// This type clears up a few APIs where having a `PrimVal` argument for something that is
+/// potentially an integer pointer or a pointer to an allocation was unclear.
+///
+/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just
+/// the representation of pointers. Also all the sites that convert between primvals and pointers
+/// are explicit now (and rare!)
+#[derive(Clone, Copy, Debug)]
+pub struct Pointer {
+    primval: PrimVal,
+}
+
+impl<'tcx> Pointer {
+    pub fn null() -> Self {
+        PrimVal::Bytes(0).into()
+    }
+    pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
+        self.primval.to_ptr()
+    }
+    pub fn into_inner_primval(self) -> PrimVal {
+        self.primval
+    }
+
+    pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+        let layout = cx.data_layout();
+        match self.primval {
+            PrimVal::Bytes(b) => {
+                assert_eq!(b as u64 as u128, b);
+                Ok(Pointer::from(
+                    PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128),
+                ))
+            }
+            PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from),
+            PrimVal::Undef => err!(ReadUndefBytes),
+        }
+    }
+
+    pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+        let layout = cx.data_layout();
+        match self.primval {
+            PrimVal::Bytes(b) => {
+                assert_eq!(b as u64 as u128, b);
+                Ok(Pointer::from(
+                    PrimVal::Bytes(layout.offset(b as u64, i)? as u128),
+                ))
+            }
+            PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from),
+            PrimVal::Undef => err!(ReadUndefBytes),
+        }
+    }
+
+    pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+        let layout = cx.data_layout();
+        match self.primval {
+            PrimVal::Bytes(b) => {
+                assert_eq!(b as u64 as u128, b);
+                Ok(Pointer::from(PrimVal::Bytes(
+                    layout.wrapping_signed_offset(b as u64, i) as u128,
+                )))
+            }
+            PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))),
+            PrimVal::Undef => err!(ReadUndefBytes),
+        }
+    }
+
+    pub fn is_null(self) -> EvalResult<'tcx, bool> {
+        match self.primval {
+            PrimVal::Bytes(b) => Ok(b == 0),
+            PrimVal::Ptr(_) => Ok(false),
+            PrimVal::Undef => err!(ReadUndefBytes),
+        }
+    }
+
+    pub fn to_value_with_len(self, len: u64) -> Value {
+        Value::ByValPair(self.primval, PrimVal::from_u128(len as u128))
+    }
+
+    pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value {
+        Value::ByValPair(self.primval, PrimVal::Ptr(vtable))
+    }
+
+    pub fn to_value(self) -> Value {
+        Value::ByVal(self.primval)
+    }
+}
+
+impl ::std::convert::From<PrimVal> for Pointer {
+    fn from(primval: PrimVal) -> Self {
+        Pointer { primval }
+    }
+}
+
+impl ::std::convert::From<MemoryPointer> for Pointer {
+    fn from(ptr: MemoryPointer) -> Self {
+        PrimVal::Ptr(ptr).into()
+    }
+}
+
+/// A `PrimVal` represents an immediate, primitive value existing outside of a
+/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
+/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes
+/// of a simple value, a pointer into another `Allocation`, or be undefined.
+#[derive(Clone, Copy, Debug)]
+pub enum PrimVal {
+    /// The raw bytes of a simple value.
+    Bytes(u128),
+
+    /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
+    /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the
+    /// relocation and its associated offset together as a `MemoryPointer` here.
+    Ptr(MemoryPointer),
+
+    /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe
+    /// to copy around, just like undefined bytes in an `Allocation`.
+    Undef,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum PrimValKind {
+    I8, I16, I32, I64, I128,
+    U8, U16, U32, U64, U128,
+    F32, F64,
+    Ptr, FnPtr,
+    Bool,
+    Char,
+}
+
+impl<'a, 'tcx: 'a> Value {
+    #[inline]
+    pub fn by_ref(ptr: Pointer) -> Self {
+        Value::ByRef(PtrAndAlign { ptr, aligned: true })
+    }
+
+    /// Convert the value into a pointer (or a pointer-sized integer).  If the value is a ByRef,
+    /// this may have to perform a load.
+    pub fn into_ptr<M: Machine<'tcx>>(
+        &self,
+        mem: &Memory<'a, 'tcx, M>,
+    ) -> EvalResult<'tcx, Pointer> {
+        use self::Value::*;
+        Ok(match *self {
+            ByRef(PtrAndAlign { ptr, aligned }) => {
+                mem.read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))?
+            }
+            ByVal(ptr) |
+            ByValPair(ptr, _) => ptr,
+        }.into())
+    }
+
+    pub(super) fn into_ptr_vtable_pair<M: Machine<'tcx>>(
+        &self,
+        mem: &Memory<'a, 'tcx, M>,
+    ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
+        use self::Value::*;
+        match *self {
+            ByRef(PtrAndAlign {
+                      ptr: ref_ptr,
+                      aligned,
+                  }) => {
+                mem.read_maybe_aligned(aligned, |mem| {
+                    let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
+                    let vtable = mem.read_ptr_sized_unsigned(
+                        ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?,
+                    )?.to_ptr()?;
+                    Ok((ptr, vtable))
+                })
+            }
+
+            ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
+
+            ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
+            _ => bug!("expected ptr and vtable, got {:?}", self),
+        }
+    }
+
+    pub(super) fn into_slice<M: Machine<'tcx>>(
+        &self,
+        mem: &Memory<'a, 'tcx, M>,
+    ) -> EvalResult<'tcx, (Pointer, u64)> {
+        use self::Value::*;
+        match *self {
+            ByRef(PtrAndAlign {
+                      ptr: ref_ptr,
+                      aligned,
+                  }) => {
+                mem.read_maybe_aligned(aligned, |mem| {
+                    let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
+                    let len = mem.read_ptr_sized_unsigned(
+                        ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?,
+                    )?.to_bytes()? as u64;
+                    Ok((ptr, len))
+                })
+            }
+            ByValPair(ptr, val) => {
+                let len = val.to_u128()?;
+                assert_eq!(len as u64 as u128, len);
+                Ok((ptr.into(), len as u64))
+            }
+            ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
+            ByVal(_) => bug!("expected ptr and length, got {:?}", self),
+        }
+    }
+}
+
+impl<'tcx> PrimVal {
+    pub fn from_u128(n: u128) -> Self {
+        PrimVal::Bytes(n)
+    }
+
+    pub fn from_i128(n: i128) -> Self {
+        PrimVal::Bytes(n as u128)
+    }
+
+    pub fn from_f32(f: f32) -> Self {
+        PrimVal::Bytes(f32_to_bytes(f))
+    }
+
+    pub fn from_f64(f: f64) -> Self {
+        PrimVal::Bytes(f64_to_bytes(f))
+    }
+
+    pub fn from_bool(b: bool) -> Self {
+        PrimVal::Bytes(b as u128)
+    }
+
+    pub fn from_char(c: char) -> Self {
+        PrimVal::Bytes(c as u128)
+    }
+
+    pub fn to_bytes(self) -> EvalResult<'tcx, u128> {
+        match self {
+            PrimVal::Bytes(b) => Ok(b),
+            PrimVal::Ptr(_) => err!(ReadPointerAsBytes),
+            PrimVal::Undef => err!(ReadUndefBytes),
+        }
+    }
+
+    pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
+        match self {
+            PrimVal::Bytes(_) => err!(ReadBytesAsPointer),
+            PrimVal::Ptr(p) => Ok(p),
+            PrimVal::Undef => err!(ReadUndefBytes),
+        }
+    }
+
+    pub fn is_bytes(self) -> bool {
+        match self {
+            PrimVal::Bytes(_) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_ptr(self) -> bool {
+        match self {
+            PrimVal::Ptr(_) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_undef(self) -> bool {
+        match self {
+            PrimVal::Undef => true,
+            _ => false,
+        }
+    }
+
+    pub fn to_u128(self) -> EvalResult<'tcx, u128> {
+        self.to_bytes()
+    }
+
+    pub fn to_u64(self) -> EvalResult<'tcx, u64> {
+        self.to_bytes().map(|b| {
+            assert_eq!(b as u64 as u128, b);
+            b as u64
+        })
+    }
+
+    pub fn to_i32(self) -> EvalResult<'tcx, i32> {
+        self.to_bytes().map(|b| {
+            assert_eq!(b as i32 as u128, b);
+            b as i32
+        })
+    }
+
+    pub fn to_i128(self) -> EvalResult<'tcx, i128> {
+        self.to_bytes().map(|b| b as i128)
+    }
+
+    pub fn to_i64(self) -> EvalResult<'tcx, i64> {
+        self.to_bytes().map(|b| {
+            assert_eq!(b as i64 as u128, b);
+            b as i64
+        })
+    }
+
+    pub fn to_f32(self) -> EvalResult<'tcx, f32> {
+        self.to_bytes().map(bytes_to_f32)
+    }
+
+    pub fn to_f64(self) -> EvalResult<'tcx, f64> {
+        self.to_bytes().map(bytes_to_f64)
+    }
+
+    pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+        match self.to_bytes()? {
+            0 => Ok(false),
+            1 => Ok(true),
+            _ => err!(InvalidBool),
+        }
+    }
+}
+
+impl PrimValKind {
+    pub fn is_int(self) -> bool {
+        use self::PrimValKind::*;
+        match self {
+            I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_signed_int(self) -> bool {
+        use self::PrimValKind::*;
+        match self {
+            I8 | I16 | I32 | I64 | I128 => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_float(self) -> bool {
+        use self::PrimValKind::*;
+        match self {
+            F32 | F64 => true,
+            _ => false,
+        }
+    }
+
+    pub fn from_uint_size(size: u64) -> Self {
+        match size {
+            1 => PrimValKind::U8,
+            2 => PrimValKind::U16,
+            4 => PrimValKind::U32,
+            8 => PrimValKind::U64,
+            16 => PrimValKind::U128,
+            _ => bug!("can't make uint with size {}", size),
+        }
+    }
+
+    pub fn from_int_size(size: u64) -> Self {
+        match size {
+            1 => PrimValKind::I8,
+            2 => PrimValKind::I16,
+            4 => PrimValKind::I32,
+            8 => PrimValKind::I64,
+            16 => PrimValKind::I128,
+            _ => bug!("can't make int with size {}", size),
+        }
+    }
+
+    pub fn is_ptr(self) -> bool {
+        use self::PrimValKind::*;
+        match self {
+            Ptr | FnPtr => true,
+            _ => false,
+        }
+    }
+}
diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs
deleted file mode 100644 (file)
index 5ae7c9d..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-use rustc::ty::{self, Ty};
-use syntax::ast::{FloatTy, IntTy, UintTy};
-
-use super::{PrimVal, EvalContext, EvalResult, MemoryPointer, PointerArithmetic, Machine};
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    pub(super) fn cast_primval(
-        &self,
-        val: PrimVal,
-        src_ty: Ty<'tcx>,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, PrimVal> {
-        trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty);
-        let src_kind = self.ty_to_primval_kind(src_ty)?;
-
-        match val {
-            PrimVal::Undef => Ok(PrimVal::Undef),
-            PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty),
-            val @ PrimVal::Bytes(_) => {
-                use super::PrimValKind::*;
-                match src_kind {
-                    F32 => self.cast_from_float(val.to_f32()? as f64, dest_ty),
-                    F64 => self.cast_from_float(val.to_f64()?, dest_ty),
-
-                    I8 | I16 | I32 | I64 | I128 => {
-                        self.cast_from_signed_int(val.to_i128()?, dest_ty)
-                    }
-
-                    Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => {
-                        self.cast_from_int(val.to_u128()?, dest_ty, false)
-                    }
-                }
-            }
-        }
-    }
-
-    fn cast_from_signed_int(&self, val: i128, ty: ty::Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
-        self.cast_from_int(val as u128, ty, val < 0)
-    }
-
-    fn int_to_int(&self, v: i128, ty: IntTy) -> u128 {
-        match ty {
-            IntTy::I8 => v as i8 as u128,
-            IntTy::I16 => v as i16 as u128,
-            IntTy::I32 => v as i32 as u128,
-            IntTy::I64 => v as i64 as u128,
-            IntTy::I128 => v as u128,
-            IntTy::Is => {
-                let ty = self.tcx.sess.target.isize_ty;
-                self.int_to_int(v, ty)
-            }
-        }
-    }
-    fn int_to_uint(&self, v: u128, ty: UintTy) -> u128 {
-        match ty {
-            UintTy::U8 => v as u8 as u128,
-            UintTy::U16 => v as u16 as u128,
-            UintTy::U32 => v as u32 as u128,
-            UintTy::U64 => v as u64 as u128,
-            UintTy::U128 => v,
-            UintTy::Us => {
-                let ty = self.tcx.sess.target.usize_ty;
-                self.int_to_uint(v, ty)
-            }
-        }
-    }
-
-    fn cast_from_int(
-        &self,
-        v: u128,
-        ty: ty::Ty<'tcx>,
-        negative: bool,
-    ) -> EvalResult<'tcx, PrimVal> {
-        trace!("cast_from_int: {}, {}, {}", v, ty, negative);
-        use rustc::ty::TypeVariants::*;
-        match ty.sty {
-            // Casts to bool are not permitted by rustc, no need to handle them here.
-            TyInt(ty) => Ok(PrimVal::Bytes(self.int_to_int(v as i128, ty))),
-            TyUint(ty) => Ok(PrimVal::Bytes(self.int_to_uint(v, ty))),
-
-            TyFloat(FloatTy::F64) if negative => Ok(PrimVal::from_f64(v as i128 as f64)),
-            TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)),
-            TyFloat(FloatTy::F32) if negative => Ok(PrimVal::from_f32(v as i128 as f32)),
-            TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)),
-
-            TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)),
-            TyChar => err!(InvalidChar(v)),
-
-            // No alignment check needed for raw pointers.  But we have to truncate to target ptr size.
-            TyRawPtr(_) => Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)),
-
-            _ => err!(Unimplemented(format!("int to {:?} cast", ty))),
-        }
-    }
-
-    fn cast_from_float(&self, val: f64, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
-        use rustc::ty::TypeVariants::*;
-        match ty.sty {
-            // Casting negative floats to unsigned integers yields zero.
-            TyUint(_) if val < 0.0 => self.cast_from_int(0, ty, false),
-            TyInt(_) if val < 0.0 => self.cast_from_int(val as i128 as u128, ty, true),
-
-            TyInt(_) | ty::TyUint(_) => self.cast_from_int(val as u128, ty, false),
-
-            TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(val)),
-            TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(val as f32)),
-            _ => err!(Unimplemented(format!("float to {:?} cast", ty))),
-        }
-    }
-
-    fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
-        use rustc::ty::TypeVariants::*;
-        match ty.sty {
-            // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
-            TyRawPtr(_) |
-            TyInt(IntTy::Is) |
-            TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)),
-            TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes),
-            _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))),
-        }
-    }
-}
diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs
deleted file mode 100644 (file)
index 075880f..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-use rustc::traits::Reveal;
-use rustc::ty::{self, TyCtxt, Ty, Instance, layout};
-use rustc::mir;
-
-use syntax::ast::Mutability;
-use syntax::codemap::Span;
-
-use super::{EvalResult, EvalError, EvalErrorKind, GlobalId, Lvalue, Value, PrimVal, EvalContext,
-            StackPopCleanup, PtrAndAlign, MemoryKind, ValTy};
-
-use rustc_const_math::ConstInt;
-
-use std::fmt;
-use std::error::Error;
-
-pub fn eval_body_as_primval<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    instance: Instance<'tcx>,
-) -> EvalResult<'tcx, (PrimVal, Ty<'tcx>)> {
-    let limits = super::ResourceLimits::default();
-    let mut ecx = EvalContext::<CompileTimeFunctionEvaluator>::new(tcx, limits, (), ());
-    let cid = GlobalId {
-        instance,
-        promoted: None,
-    };
-    if ecx.tcx.has_attr(instance.def_id(), "linkage") {
-        return Err(ConstEvalError::NotConst("extern global".to_string()).into());
-    }
-
-    let mir = ecx.load_mir(instance.def)?;
-    if !ecx.globals.contains_key(&cid) {
-        let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)?
-            .expect("unsized global");
-        let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?;
-        let ptr = ecx.memory.allocate(
-            size,
-            align,
-            MemoryKind::UninitializedStatic,
-        )?;
-        let aligned = !ecx.is_packed(mir.return_ty)?;
-        ecx.globals.insert(
-            cid,
-            PtrAndAlign {
-                ptr: ptr.into(),
-                aligned,
-            },
-        );
-        let mutable = !mir.return_ty.is_freeze(
-            ecx.tcx,
-            ty::ParamEnv::empty(Reveal::All),
-            mir.span,
-        );
-        let mutability = if mutable {
-            Mutability::Mutable
-        } else {
-            Mutability::Immutable
-        };
-        let cleanup = StackPopCleanup::MarkStatic(mutability);
-        let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
-        trace!("const_eval: pushing stack frame for global: {}", name);
-        ecx.push_stack_frame(
-            instance,
-            mir.span,
-            mir,
-            Lvalue::from_ptr(ptr),
-            cleanup,
-        )?;
-
-        while ecx.step()? {}
-    }
-    let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached"));
-    let valty = ValTy {
-        value,
-        ty: mir.return_ty,
-    };
-    Ok((ecx.value_to_primval(valty)?, mir.return_ty))
-}
-
-pub fn eval_body_as_integer<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    instance: Instance<'tcx>,
-) -> EvalResult<'tcx, ConstInt> {
-    let (prim, ty) = eval_body_as_primval(tcx, instance)?;
-    let prim = prim.to_bytes()?;
-    use syntax::ast::{IntTy, UintTy};
-    use rustc::ty::TypeVariants::*;
-    use rustc_const_math::{ConstIsize, ConstUsize};
-    Ok(match ty.sty {
-        TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8),
-        TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16),
-        TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32),
-        TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64),
-        TyInt(IntTy::I128) => ConstInt::I128(prim as i128),
-        TyInt(IntTy::Is) => ConstInt::Isize(
-            ConstIsize::new(prim as i128 as i64, tcx.sess.target.isize_ty)
-                .expect("miri should already have errored"),
-        ),
-        TyUint(UintTy::U8) => ConstInt::U8(prim as u8),
-        TyUint(UintTy::U16) => ConstInt::U16(prim as u16),
-        TyUint(UintTy::U32) => ConstInt::U32(prim as u32),
-        TyUint(UintTy::U64) => ConstInt::U64(prim as u64),
-        TyUint(UintTy::U128) => ConstInt::U128(prim),
-        TyUint(UintTy::Us) => ConstInt::Usize(
-            ConstUsize::new(prim as u64, tcx.sess.target.usize_ty)
-                .expect("miri should already have errored"),
-        ),
-        _ => {
-            return Err(
-                ConstEvalError::NeedsRfc(
-                    "evaluating anything other than isize/usize during typeck".to_string(),
-                ).into(),
-            )
-        }
-    })
-}
-
-struct CompileTimeFunctionEvaluator;
-
-impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
-    fn into(self) -> EvalError<'tcx> {
-        EvalErrorKind::MachineError(Box::new(self)).into()
-    }
-}
-
-#[derive(Clone, Debug)]
-enum ConstEvalError {
-    NeedsRfc(String),
-    NotConst(String),
-}
-
-impl fmt::Display for ConstEvalError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        use self::ConstEvalError::*;
-        match *self {
-            NeedsRfc(ref msg) => {
-                write!(
-                    f,
-                    "\"{}\" needs an rfc before being allowed inside constants",
-                    msg
-                )
-            }
-            NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg),
-        }
-    }
-}
-
-impl Error for ConstEvalError {
-    fn description(&self) -> &str {
-        use self::ConstEvalError::*;
-        match *self {
-            NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants",
-            NotConst(_) => "this feature is not compatible with constant evaluation",
-        }
-    }
-
-    fn cause(&self) -> Option<&Error> {
-        None
-    }
-}
-
-impl<'tcx> super::Machine<'tcx> for CompileTimeFunctionEvaluator {
-    type Data = ();
-    type MemoryData = ();
-    type MemoryKinds = !;
-    fn eval_fn_call<'a>(
-        ecx: &mut EvalContext<'a, 'tcx, Self>,
-        instance: ty::Instance<'tcx>,
-        destination: Option<(Lvalue, mir::BasicBlock)>,
-        _args: &[ValTy<'tcx>],
-        span: Span,
-        _sig: ty::FnSig<'tcx>,
-    ) -> EvalResult<'tcx, bool> {
-        if !ecx.tcx.is_const_fn(instance.def_id()) {
-            return Err(
-                ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(),
-            );
-        }
-        let mir = match ecx.load_mir(instance.def) {
-            Ok(mir) => mir,
-            Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
-                // some simple things like `malloc` might get accepted in the future
-                return Err(
-                    ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path))
-                        .into(),
-                );
-            }
-            Err(other) => return Err(other),
-        };
-        let (return_lvalue, return_to_block) = match destination {
-            Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)),
-            None => (Lvalue::undef(), StackPopCleanup::None),
-        };
-
-        ecx.push_stack_frame(
-            instance,
-            span,
-            mir,
-            return_lvalue,
-            return_to_block,
-        )?;
-
-        Ok(false)
-    }
-
-    fn call_intrinsic<'a>(
-        _ecx: &mut EvalContext<'a, 'tcx, Self>,
-        _instance: ty::Instance<'tcx>,
-        _args: &[ValTy<'tcx>],
-        _dest: Lvalue,
-        _dest_ty: Ty<'tcx>,
-        _dest_layout: &'tcx layout::Layout,
-        _target: mir::BasicBlock,
-    ) -> EvalResult<'tcx> {
-        Err(
-            ConstEvalError::NeedsRfc("calling intrinsics".to_string()).into(),
-        )
-    }
-
-    fn try_ptr_op<'a>(
-        _ecx: &EvalContext<'a, 'tcx, Self>,
-        _bin_op: mir::BinOp,
-        left: PrimVal,
-        _left_ty: Ty<'tcx>,
-        right: PrimVal,
-        _right_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> {
-        if left.is_bytes() && right.is_bytes() {
-            Ok(None)
-        } else {
-            Err(
-                ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(),
-            )
-        }
-    }
-
-    fn mark_static_initialized(m: !) -> EvalResult<'tcx> {
-        m
-    }
-
-    fn box_alloc<'a>(
-        _ecx: &mut EvalContext<'a, 'tcx, Self>,
-        _ty: ty::Ty<'tcx>,
-        _dest: Lvalue,
-    ) -> EvalResult<'tcx> {
-        Err(
-            ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(),
-        )
-    }
-
-    fn global_item_with_linkage<'a>(
-        _ecx: &mut EvalContext<'a, 'tcx, Self>,
-        _instance: ty::Instance<'tcx>,
-        _mutability: Mutability,
-    ) -> EvalResult<'tcx> {
-        Err(
-            ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(),
-        )
-    }
-}
diff --git a/src/librustc_mir/interpret/error.rs b/src/librustc_mir/interpret/error.rs
deleted file mode 100644 (file)
index 96911c1..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-use std::error::Error;
-use std::{fmt, env};
-
-use rustc::mir;
-use rustc::ty::{FnSig, Ty, layout};
-
-use super::{
-    MemoryPointer, Lock, AccessKind
-};
-
-use rustc_const_math::ConstMathErr;
-use syntax::codemap::Span;
-use backtrace::Backtrace;
-
-#[derive(Debug)]
-pub struct EvalError<'tcx> {
-    pub kind: EvalErrorKind<'tcx>,
-    pub backtrace: Option<Backtrace>,
-}
-
-impl<'tcx> From<EvalErrorKind<'tcx>> for EvalError<'tcx> {
-    fn from(kind: EvalErrorKind<'tcx>) -> Self {
-        let backtrace = match env::var("RUST_BACKTRACE") {
-            Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()),
-            _ => None
-        };
-        EvalError {
-            kind,
-            backtrace,
-        }
-    }
-}
-
-#[derive(Debug)]
-pub enum EvalErrorKind<'tcx> {
-    /// This variant is used by machines to signal their own errors that do not
-    /// match an existing variant
-    MachineError(Box<Error>),
-    FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>),
-    NoMirFor(String),
-    UnterminatedCString(MemoryPointer),
-    DanglingPointerDeref,
-    DoubleFree,
-    InvalidMemoryAccess,
-    InvalidFunctionPointer,
-    InvalidBool,
-    InvalidDiscriminant,
-    PointerOutOfBounds {
-        ptr: MemoryPointer,
-        access: bool,
-        allocation_size: u64,
-    },
-    InvalidNullPointerUsage,
-    ReadPointerAsBytes,
-    ReadBytesAsPointer,
-    InvalidPointerMath,
-    ReadUndefBytes,
-    DeadLocal,
-    InvalidBoolOp(mir::BinOp),
-    Unimplemented(String),
-    DerefFunctionPointer,
-    ExecuteMemory,
-    ArrayIndexOutOfBounds(Span, u64, u64),
-    Math(Span, ConstMathErr),
-    Intrinsic(String),
-    OverflowingMath,
-    InvalidChar(u128),
-    OutOfMemory {
-        allocation_size: u64,
-        memory_size: u64,
-        memory_usage: u64,
-    },
-    ExecutionTimeLimitReached,
-    StackFrameLimitReached,
-    OutOfTls,
-    TlsOutOfBounds,
-    AbiViolation(String),
-    AlignmentCheckFailed {
-        required: u64,
-        has: u64,
-    },
-    MemoryLockViolation {
-        ptr: MemoryPointer,
-        len: u64,
-        frame: usize,
-        access: AccessKind,
-        lock: Lock,
-    },
-    MemoryAcquireConflict {
-        ptr: MemoryPointer,
-        len: u64,
-        kind: AccessKind,
-        lock: Lock,
-    },
-    InvalidMemoryLockRelease {
-        ptr: MemoryPointer,
-        len: u64,
-        frame: usize,
-        lock: Lock,
-    },
-    DeallocatedLockedMemory {
-        ptr: MemoryPointer,
-        lock: Lock,
-    },
-    ValidationFailure(String),
-    CalledClosureAsFunction,
-    VtableForArgumentlessMethod,
-    ModifiedConstantMemory,
-    AssumptionNotHeld,
-    InlineAsm,
-    TypeNotPrimitive(Ty<'tcx>),
-    ReallocatedWrongMemoryKind(String, String),
-    DeallocatedWrongMemoryKind(String, String),
-    ReallocateNonBasePtr,
-    DeallocateNonBasePtr,
-    IncorrectAllocationInformation,
-    Layout(layout::LayoutError<'tcx>),
-    HeapAllocZeroBytes,
-    HeapAllocNonPowerOfTwoAlignment(u64),
-    Unreachable,
-    Panic,
-    ReadFromReturnPointer,
-    PathNotFound(Vec<String>),
-}
-
-pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
-
-impl<'tcx> Error for EvalError<'tcx> {
-    fn description(&self) -> &str {
-        use self::EvalErrorKind::*;
-        match self.kind {
-            MachineError(ref inner) => inner.description(),
-            FunctionPointerTyMismatch(..) =>
-                "tried to call a function through a function pointer of a different type",
-            InvalidMemoryAccess =>
-                "tried to access memory through an invalid pointer",
-            DanglingPointerDeref =>
-                "dangling pointer was dereferenced",
-            DoubleFree =>
-                "tried to deallocate dangling pointer",
-            InvalidFunctionPointer =>
-                "tried to use a function pointer after offsetting it",
-            InvalidBool =>
-                "invalid boolean value read",
-            InvalidDiscriminant =>
-                "invalid enum discriminant value read",
-            PointerOutOfBounds { .. } =>
-                "pointer offset outside bounds of allocation",
-            InvalidNullPointerUsage =>
-                "invalid use of NULL pointer",
-            MemoryLockViolation { .. } =>
-                "memory access conflicts with lock",
-            MemoryAcquireConflict { .. } =>
-                "new memory lock conflicts with existing lock",
-            ValidationFailure(..) =>
-                "type validation failed",
-            InvalidMemoryLockRelease { .. } =>
-                "invalid attempt to release write lock",
-            DeallocatedLockedMemory { .. } =>
-                "tried to deallocate memory in conflict with a lock",
-            ReadPointerAsBytes =>
-                "a raw memory access tried to access part of a pointer value as raw bytes",
-            ReadBytesAsPointer =>
-                "a memory access tried to interpret some bytes as a pointer",
-            InvalidPointerMath =>
-                "attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations",
-            ReadUndefBytes =>
-                "attempted to read undefined bytes",
-            DeadLocal =>
-                "tried to access a dead local variable",
-            InvalidBoolOp(_) =>
-                "invalid boolean operation",
-            Unimplemented(ref msg) => msg,
-            DerefFunctionPointer =>
-                "tried to dereference a function pointer",
-            ExecuteMemory =>
-                "tried to treat a memory pointer as a function pointer",
-            ArrayIndexOutOfBounds(..) =>
-                "array index out of bounds",
-            Math(..) =>
-                "mathematical operation failed",
-            Intrinsic(..) =>
-                "intrinsic failed",
-            OverflowingMath =>
-                "attempted to do overflowing math",
-            NoMirFor(..) =>
-                "mir not found",
-            InvalidChar(..) =>
-                "tried to interpret an invalid 32-bit value as a char",
-            OutOfMemory{..} =>
-                "could not allocate more memory",
-            ExecutionTimeLimitReached =>
-                "reached the configured maximum execution time",
-            StackFrameLimitReached =>
-                "reached the configured maximum number of stack frames",
-            OutOfTls =>
-                "reached the maximum number of representable TLS keys",
-            TlsOutOfBounds =>
-                "accessed an invalid (unallocated) TLS key",
-            AbiViolation(ref msg) => msg,
-            AlignmentCheckFailed{..} =>
-                "tried to execute a misaligned read or write",
-            CalledClosureAsFunction =>
-                "tried to call a closure through a function pointer",
-            VtableForArgumentlessMethod =>
-                "tried to call a vtable function without arguments",
-            ModifiedConstantMemory =>
-                "tried to modify constant memory",
-            AssumptionNotHeld =>
-                "`assume` argument was false",
-            InlineAsm =>
-                "miri does not support inline assembly",
-            TypeNotPrimitive(_) =>
-                "expected primitive type, got nonprimitive",
-            ReallocatedWrongMemoryKind(_, _) =>
-                "tried to reallocate memory from one kind to another",
-            DeallocatedWrongMemoryKind(_, _) =>
-                "tried to deallocate memory of the wrong kind",
-            ReallocateNonBasePtr =>
-                "tried to reallocate with a pointer not to the beginning of an existing object",
-            DeallocateNonBasePtr =>
-                "tried to deallocate with a pointer not to the beginning of an existing object",
-            IncorrectAllocationInformation =>
-                "tried to deallocate or reallocate using incorrect alignment or size",
-            Layout(_) =>
-                "rustc layout computation failed",
-            UnterminatedCString(_) =>
-                "attempted to get length of a null terminated string, but no null found before end of allocation",
-            HeapAllocZeroBytes =>
-                "tried to re-, de- or allocate zero bytes on the heap",
-            HeapAllocNonPowerOfTwoAlignment(_) =>
-                "tried to re-, de-, or allocate heap memory with alignment that is not a power of two",
-            Unreachable =>
-                "entered unreachable code",
-            Panic =>
-                "the evaluated program panicked",
-            ReadFromReturnPointer =>
-                "tried to read from the return pointer",
-            EvalErrorKind::PathNotFound(_) =>
-                "a path could not be resolved, maybe the crate is not loaded",
-        }
-    }
-
-    fn cause(&self) -> Option<&Error> {
-        use self::EvalErrorKind::*;
-        match self.kind {
-            MachineError(ref inner) => Some(&**inner),
-            _ => None,
-        }
-    }
-}
-
-impl<'tcx> fmt::Display for EvalError<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        use self::EvalErrorKind::*;
-        match self.kind {
-            PointerOutOfBounds { ptr, access, allocation_size } => {
-                write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}",
-                       if access { "memory access" } else { "pointer computed" },
-                       ptr.offset, ptr.alloc_id, allocation_size)
-            },
-            MemoryLockViolation { ptr, len, frame, access, ref lock } => {
-                write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
-                       access, frame, ptr, len, lock)
-            }
-            MemoryAcquireConflict { ptr, len, kind, ref lock } => {
-                write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}",
-                       kind, ptr, len, lock)
-            }
-            InvalidMemoryLockRelease { ptr, len, frame, ref lock } => {
-                write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}",
-                       frame, ptr, len, lock)
-            }
-            DeallocatedLockedMemory { ptr, ref lock } => {
-                write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}",
-                       ptr, lock)
-            }
-            ValidationFailure(ref err) => {
-                write!(f, "type validation failed: {}", err)
-            }
-            NoMirFor(ref func) => write!(f, "no mir for `{}`", func),
-            FunctionPointerTyMismatch(sig, got) =>
-                write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got),
-            ArrayIndexOutOfBounds(span, len, index) =>
-                write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span),
-            ReallocatedWrongMemoryKind(ref old, ref new) =>
-                write!(f, "tried to reallocate memory from {} to {}", old, new),
-            DeallocatedWrongMemoryKind(ref old, ref new) =>
-                write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new),
-            Math(span, ref err) =>
-                write!(f, "{:?} at {:?}", err, span),
-            Intrinsic(ref err) =>
-                write!(f, "{}", err),
-            InvalidChar(c) =>
-                write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
-            OutOfMemory { allocation_size, memory_size, memory_usage } =>
-                write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory",
-                       allocation_size, memory_size - memory_usage, memory_size),
-            AlignmentCheckFailed { required, has } =>
-               write!(f, "tried to access memory with alignment {}, but alignment {} is required",
-                      has, required),
-            TypeNotPrimitive(ty) =>
-                write!(f, "expected primitive type, got {}", ty),
-            Layout(ref err) =>
-                write!(f, "rustc layout computation failed: {:?}", err),
-            PathNotFound(ref path) =>
-                write!(f, "Cannot find path {:?}", path),
-            MachineError(ref inner) =>
-                write!(f, "machine error: {}", inner),
-            _ => write!(f, "{}", self.description()),
-        }
-    }
-}
diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs
deleted file mode 100644 (file)
index 3388031..0000000
+++ /dev/null
@@ -1,2534 +0,0 @@
-use std::collections::{HashMap, HashSet};
-use std::fmt::Write;
-
-use rustc::hir::def_id::DefId;
-use rustc::hir::map::definitions::DefPathData;
-use rustc::middle::const_val::ConstVal;
-use rustc::middle::region;
-use rustc::mir;
-use rustc::traits::Reveal;
-use rustc::ty::layout::{self, Layout, Size, Align, HasDataLayout};
-use rustc::ty::subst::{Subst, Substs, Kind};
-use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
-use rustc_data_structures::indexed_vec::Idx;
-use syntax::codemap::{self, DUMMY_SP};
-use syntax::ast::Mutability;
-use syntax::abi::Abi;
-
-use super::{EvalError, EvalResult, EvalErrorKind, GlobalId, Lvalue, LvalueExtra, Memory,
-            MemoryPointer, HasMemory, MemoryKind, operator, PrimVal, PrimValKind, Value, Pointer,
-            ValidationQuery, Machine};
-
-pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
-    /// Stores data required by the `Machine`
-    pub machine_data: M::Data,
-
-    /// The results of the type checker, from rustc.
-    pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
-
-    /// The virtual memory system.
-    pub memory: Memory<'a, 'tcx, M>,
-
-    /// Lvalues that were suspended by the validation subsystem, and will be recovered later
-    pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
-
-    /// Precomputed statics, constants and promoteds.
-    pub globals: HashMap<GlobalId<'tcx>, PtrAndAlign>,
-
-    /// The virtual call stack.
-    pub(crate) stack: Vec<Frame<'tcx>>,
-
-    /// The maximum number of stack frames allowed
-    pub(crate) stack_limit: usize,
-
-    /// The maximum number of operations that may be executed.
-    /// This prevents infinite loops and huge computations from freezing up const eval.
-    /// Remove once halting problem is solved.
-    pub(crate) steps_remaining: u64,
-}
-
-/// A stack frame.
-pub struct Frame<'tcx> {
-    ////////////////////////////////////////////////////////////////////////////////
-    // Function and callsite information
-    ////////////////////////////////////////////////////////////////////////////////
-    /// The MIR for the function called on this frame.
-    pub mir: &'tcx mir::Mir<'tcx>,
-
-    /// The def_id and substs of the current function
-    pub instance: ty::Instance<'tcx>,
-
-    /// The span of the call site.
-    pub span: codemap::Span,
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // Return lvalue and locals
-    ////////////////////////////////////////////////////////////////////////////////
-    /// The block to return to when returning from the current stack frame
-    pub return_to_block: StackPopCleanup,
-
-    /// The location where the result of the current stack frame should be written to.
-    pub return_lvalue: Lvalue,
-
-    /// The list of locals for this stack frame, stored in order as
-    /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
-    /// `None` represents a local that is currently dead, while a live local
-    /// can either directly contain `PrimVal` or refer to some part of an `Allocation`.
-    ///
-    /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`.
-    pub locals: Vec<Option<Value>>,
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // Current position within the function
-    ////////////////////////////////////////////////////////////////////////////////
-    /// The block that is currently executed (or will be executed after the above call stacks
-    /// return).
-    pub block: mir::BasicBlock,
-
-    /// The index of the currently evaluated statment.
-    pub stmt: usize,
-}
-
-#[derive(Clone, Debug, Eq, PartialEq, Hash)]
-pub enum StackPopCleanup {
-    /// The stackframe existed to compute the initial value of a static/constant, make sure it
-    /// isn't modifyable afterwards in case of constants.
-    /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through
-    /// references or deallocated
-    MarkStatic(Mutability),
-    /// A regular stackframe added due to a function call will need to get forwarded to the next
-    /// block
-    Goto(mir::BasicBlock),
-    /// The main function and diverging functions have nowhere to return to
-    None,
-}
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-pub struct DynamicLifetime {
-    pub frame: usize,
-    pub region: Option<region::Scope>, // "None" indicates "until the function ends"
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct ResourceLimits {
-    pub memory_size: u64,
-    pub step_limit: u64,
-    pub stack_limit: usize,
-}
-
-impl Default for ResourceLimits {
-    fn default() -> Self {
-        ResourceLimits {
-            memory_size: 100 * 1024 * 1024, // 100 MB
-            step_limit: 1_000_000,
-            stack_limit: 100,
-        }
-    }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct TyAndPacked<'tcx> {
-    pub ty: Ty<'tcx>,
-    pub packed: bool,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct ValTy<'tcx> {
-    pub value: Value,
-    pub ty: Ty<'tcx>,
-}
-
-impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
-    type Target = Value;
-    fn deref(&self) -> &Value {
-        &self.value
-    }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct PtrAndAlign {
-    pub ptr: Pointer,
-    /// Remember whether this lvalue is *supposed* to be aligned.
-    pub aligned: bool,
-}
-
-impl PtrAndAlign {
-    pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> {
-        self.ptr.to_ptr()
-    }
-    pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
-        Ok(PtrAndAlign {
-            ptr: self.ptr.offset(i, cx)?,
-            aligned: self.aligned,
-        })
-    }
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    pub fn new(
-        tcx: TyCtxt<'a, 'tcx, 'tcx>,
-        limits: ResourceLimits,
-        machine_data: M::Data,
-        memory_data: M::MemoryData,
-    ) -> Self {
-        EvalContext {
-            machine_data,
-            tcx,
-            memory: Memory::new(&tcx.data_layout, limits.memory_size, memory_data),
-            suspended: HashMap::new(),
-            globals: HashMap::new(),
-            stack: Vec::new(),
-            stack_limit: limits.stack_limit,
-            steps_remaining: limits.step_limit,
-        }
-    }
-
-    pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> {
-        let substs = self.substs();
-        self.alloc_ptr_with_substs(ty, substs)
-    }
-
-    pub fn alloc_ptr_with_substs(
-        &mut self,
-        ty: Ty<'tcx>,
-        substs: &'tcx Substs<'tcx>,
-    ) -> EvalResult<'tcx, MemoryPointer> {
-        let size = self.type_size_with_substs(ty, substs)?.expect(
-            "cannot alloc memory for unsized type",
-        );
-        let align = self.type_align_with_substs(ty, substs)?;
-        self.memory.allocate(size, align, MemoryKind::Stack)
-    }
-
-    pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
-        &self.memory
-    }
-
-    pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
-        &mut self.memory
-    }
-
-    pub fn stack(&self) -> &[Frame<'tcx>] {
-        &self.stack
-    }
-
-    #[inline]
-    pub fn cur_frame(&self) -> usize {
-        assert!(self.stack.len() > 0);
-        self.stack.len() - 1
-    }
-
-    pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
-        let ptr = self.memory.allocate_cached(s.as_bytes())?;
-        Ok(Value::ByValPair(
-            PrimVal::Ptr(ptr),
-            PrimVal::from_u128(s.len() as u128),
-        ))
-    }
-
-    pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
-        use rustc::middle::const_val::ConstVal::*;
-
-        let primval = match *const_val {
-            Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()),
-
-            Float(val) => PrimVal::Bytes(val.bits),
-
-            Bool(b) => PrimVal::from_bool(b),
-            Char(c) => PrimVal::from_char(c),
-
-            Str(ref s) => return self.str_to_value(s),
-
-            ByteStr(ref bs) => {
-                let ptr = self.memory.allocate_cached(bs.data)?;
-                PrimVal::Ptr(ptr)
-            }
-
-            Unevaluated(def_id, substs) => {
-                let instance = self.resolve_associated_const(def_id, substs);
-                let cid = GlobalId {
-                    instance,
-                    promoted: None,
-                };
-                return Ok(Value::ByRef(*self.globals.get(&cid).expect("static/const not cached")));
-            }
-
-            Aggregate(..) |
-            Variant(_) => bug!("should not have aggregate or variant constants in MIR"),
-            // function items are zero sized and thus have no readable value
-            Function(..) => PrimVal::Undef,
-        };
-
-        Ok(Value::ByVal(primval))
-    }
-
-    pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
-        // generics are weird, don't run this function on a generic
-        assert!(!ty.needs_subst());
-        ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
-    }
-
-    pub fn load_mir(
-        &self,
-        instance: ty::InstanceDef<'tcx>,
-    ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
-        trace!("load mir {:?}", instance);
-        match instance {
-            ty::InstanceDef::Item(def_id) => {
-                self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| {
-                    EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into()
-                })
-            }
-            _ => Ok(self.tcx.instance_mir(instance)),
-        }
-    }
-
-    pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
-        // miri doesn't care about lifetimes, and will choke on some crazy ones
-        // let's simply get rid of them
-        let without_lifetimes = self.tcx.erase_regions(&ty);
-        let substituted = without_lifetimes.subst(self.tcx, substs);
-        let substituted = self.tcx.normalize_associated_type(&substituted);
-        substituted
-    }
-
-    /// Return the size and aligment of the value at the given type.
-    /// Note that the value does not matter if the type is sized. For unsized types,
-    /// the value has to be a fat pointer, and we only care about the "extra" data in it.
-    pub fn size_and_align_of_dst(
-        &mut self,
-        ty: ty::Ty<'tcx>,
-        value: Value,
-    ) -> EvalResult<'tcx, (u64, u64)> {
-        if let Some(size) = self.type_size(ty)? {
-            Ok((size as u64, self.type_align(ty)? as u64))
-        } else {
-            match ty.sty {
-                ty::TyAdt(..) | ty::TyTuple(..) => {
-                    // First get the size of all statically known fields.
-                    // Don't use type_of::sizing_type_of because that expects t to be sized,
-                    // and it also rounds up to alignment, which we want to avoid,
-                    // as the unsized field's alignment could be smaller.
-                    assert!(!ty.is_simd());
-                    let layout = self.type_layout(ty)?;
-                    debug!("DST {} layout: {:?}", ty, layout);
-
-                    let (sized_size, sized_align) = match *layout {
-                        ty::layout::Layout::Univariant { ref variant, .. } => {
-                            (
-                                variant.offsets.last().map_or(0, |o| o.bytes()),
-                                variant.align,
-                            )
-                        }
-                        _ => {
-                            bug!(
-                                "size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
-                                ty,
-                                layout
-                            );
-                        }
-                    };
-                    debug!(
-                        "DST {} statically sized prefix size: {} align: {:?}",
-                        ty,
-                        sized_size,
-                        sized_align
-                    );
-
-                    // Recurse to get the size of the dynamically sized field (must be
-                    // the last field).
-                    let (unsized_size, unsized_align) = match ty.sty {
-                        ty::TyAdt(def, substs) => {
-                            let last_field = def.struct_variant().fields.last().unwrap();
-                            let field_ty = self.field_ty(substs, last_field);
-                            self.size_and_align_of_dst(field_ty, value)?
-                        }
-                        ty::TyTuple(ref types, _) => {
-                            let field_ty = types.last().unwrap();
-                            let field_ty = self.tcx.normalize_associated_type(field_ty);
-                            self.size_and_align_of_dst(field_ty, value)?
-                        }
-                        _ => bug!("We already checked that we know this type"),
-                    };
-
-                    // FIXME (#26403, #27023): We should be adding padding
-                    // to `sized_size` (to accommodate the `unsized_align`
-                    // required of the unsized field that follows) before
-                    // summing it with `sized_size`. (Note that since #26403
-                    // is unfixed, we do not yet add the necessary padding
-                    // here. But this is where the add would go.)
-
-                    // Return the sum of sizes and max of aligns.
-                    let size = sized_size + unsized_size;
-
-                    // Choose max of two known alignments (combined value must
-                    // be aligned according to more restrictive of the two).
-                    let align =
-                        sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap());
-
-                    // Issue #27023: must add any necessary padding to `size`
-                    // (to make it a multiple of `align`) before returning it.
-                    //
-                    // Namely, the returned size should be, in C notation:
-                    //
-                    //   `size + ((size & (align-1)) ? align : 0)`
-                    //
-                    // emulated via the semi-standard fast bit trick:
-                    //
-                    //   `(size + (align-1)) & -align`
-
-                    let size = Size::from_bytes(size).abi_align(align).bytes();
-                    Ok((size, align.abi()))
-                }
-                ty::TyDynamic(..) => {
-                    let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
-                    // the second entry in the vtable is the dynamic size of the object.
-                    self.read_size_and_align_from_vtable(vtable)
-                }
-
-                ty::TySlice(_) | ty::TyStr => {
-                    let elem_ty = ty.sequence_element_type(self.tcx);
-                    let elem_size = self.type_size(elem_ty)?.expect(
-                        "slice element must be sized",
-                    ) as u64;
-                    let (_, len) = value.into_slice(&mut self.memory)?;
-                    let align = self.type_align(elem_ty)?;
-                    Ok((len * elem_size, align as u64))
-                }
-
-                _ => bug!("size_of_val::<{:?}>", ty),
-            }
-        }
-    }
-
-    /// Returns the normalized type of a struct field
-    fn field_ty(&self, param_substs: &Substs<'tcx>, f: &ty::FieldDef) -> ty::Ty<'tcx> {
-        self.tcx.normalize_associated_type(
-            &f.ty(self.tcx, param_substs),
-        )
-    }
-
-    pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
-        self.type_size_with_substs(ty, self.substs())
-    }
-
-    pub fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
-        self.type_align_with_substs(ty, self.substs())
-    }
-
-    pub fn type_size_with_substs(
-        &self,
-        ty: Ty<'tcx>,
-        substs: &'tcx Substs<'tcx>,
-    ) -> EvalResult<'tcx, Option<u64>> {
-        let layout = self.type_layout_with_substs(ty, substs)?;
-        if layout.is_unsized() {
-            Ok(None)
-        } else {
-            Ok(Some(layout.size(&self.tcx.data_layout).bytes()))
-        }
-    }
-
-    pub fn type_align_with_substs(
-        &self,
-        ty: Ty<'tcx>,
-        substs: &'tcx Substs<'tcx>,
-    ) -> EvalResult<'tcx, u64> {
-        self.type_layout_with_substs(ty, substs).map(|layout| {
-            layout.align(&self.tcx.data_layout).abi()
-        })
-    }
-
-    pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
-        self.type_layout_with_substs(ty, self.substs())
-    }
-
-    fn type_layout_with_substs(
-        &self,
-        ty: Ty<'tcx>,
-        substs: &'tcx Substs<'tcx>,
-    ) -> EvalResult<'tcx, &'tcx Layout> {
-        // TODO(solson): Is this inefficient? Needs investigation.
-        let ty = self.monomorphize(ty, substs);
-
-        ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All))
-            .map_err(|layout| EvalErrorKind::Layout(layout).into())
-    }
-
-    pub fn push_stack_frame(
-        &mut self,
-        instance: ty::Instance<'tcx>,
-        span: codemap::Span,
-        mir: &'tcx mir::Mir<'tcx>,
-        return_lvalue: Lvalue,
-        return_to_block: StackPopCleanup,
-    ) -> EvalResult<'tcx> {
-        ::log_settings::settings().indentation += 1;
-
-        /// Return the set of locals that have a storage annotation anywhere
-        fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::Local> {
-            use rustc::mir::StatementKind::*;
-
-            let mut set = HashSet::new();
-            for block in mir.basic_blocks() {
-                for stmt in block.statements.iter() {
-                    match stmt.kind {
-                        StorageLive(local) |
-                        StorageDead(local) => {
-                            set.insert(local);
-                        }
-                        _ => {}
-                    }
-                }
-            }
-            set
-        }
-
-        // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local
-        // `Value` for that.
-        let num_locals = mir.local_decls.len() - 1;
-
-        let locals = {
-            let annotated_locals = collect_storage_annotations(mir);
-            let mut locals = vec![None; num_locals];
-            for i in 0..num_locals {
-                let local = mir::Local::new(i + 1);
-                if !annotated_locals.contains(&local) {
-                    locals[i] = Some(Value::ByVal(PrimVal::Undef));
-                }
-            }
-            locals
-        };
-
-        self.stack.push(Frame {
-            mir,
-            block: mir::START_BLOCK,
-            return_to_block,
-            return_lvalue,
-            locals,
-            span,
-            instance,
-            stmt: 0,
-        });
-
-        self.memory.cur_frame = self.cur_frame();
-
-        if self.stack.len() > self.stack_limit {
-            err!(StackFrameLimitReached)
-        } else {
-            Ok(())
-        }
-    }
-
-    pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
-        ::log_settings::settings().indentation -= 1;
-        self.end_region(None)?;
-        let frame = self.stack.pop().expect(
-            "tried to pop a stack frame, but there were none",
-        );
-        if !self.stack.is_empty() {
-            // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
-            self.memory.cur_frame = self.cur_frame();
-        }
-        match frame.return_to_block {
-            StackPopCleanup::MarkStatic(mutable) => {
-                if let Lvalue::Ptr { ptr, .. } = frame.return_lvalue {
-                    // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
-                    self.memory.mark_static_initalized(
-                        ptr.to_ptr()?.alloc_id,
-                        mutable,
-                    )?
-                } else {
-                    bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue);
-                }
-            }
-            StackPopCleanup::Goto(target) => self.goto_block(target),
-            StackPopCleanup::None => {}
-        }
-        // deallocate all locals that are backed by an allocation
-        for local in frame.locals {
-            self.deallocate_local(local)?;
-        }
-
-        Ok(())
-    }
-
-    pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
-        if let Some(Value::ByRef(ptr)) = local {
-            trace!("deallocating local");
-            let ptr = ptr.to_ptr()?;
-            self.memory.dump_alloc(ptr.alloc_id);
-            match self.memory.get(ptr.alloc_id)?.kind {
-                // for a constant like `const FOO: &i32 = &1;` the local containing
-                // the `1` is referred to by the global. We transitively marked everything
-                // the global refers to as static itself, so we don't free it here
-                MemoryKind::Static => {}
-                MemoryKind::Stack => self.memory.deallocate(ptr, None, MemoryKind::Stack)?,
-                other => bug!("local contained non-stack memory: {:?}", other),
-            }
-        };
-        Ok(())
-    }
-
-    pub fn assign_discr_and_fields(
-        &mut self,
-        dest: Lvalue,
-        dest_ty: Ty<'tcx>,
-        discr_offset: u64,
-        operands: &[mir::Operand<'tcx>],
-        discr_val: u128,
-        variant_idx: usize,
-        discr_size: u64,
-        discr_signed: bool,
-    ) -> EvalResult<'tcx> {
-        // FIXME(solson)
-        let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
-
-        let discr_dest = dest_ptr.offset(discr_offset, &self)?;
-        self.memory.write_primval(discr_dest, PrimVal::Bytes(discr_val), discr_size, discr_signed)?;
-
-        let dest = Lvalue::Ptr {
-            ptr: PtrAndAlign {
-                ptr: dest_ptr.into(),
-                aligned: true,
-            },
-            extra: LvalueExtra::DowncastVariant(variant_idx),
-        };
-
-        self.assign_fields(dest, dest_ty, operands)
-    }
-
-    pub fn assign_fields(
-        &mut self,
-        dest: Lvalue,
-        dest_ty: Ty<'tcx>,
-        operands: &[mir::Operand<'tcx>],
-    ) -> EvalResult<'tcx> {
-        if self.type_size(dest_ty)? == Some(0) {
-            // zst assigning is a nop
-            return Ok(());
-        }
-        if self.ty_to_primval_kind(dest_ty).is_ok() {
-            assert_eq!(operands.len(), 1);
-            let value = self.eval_operand(&operands[0])?;
-            return self.write_value(value, dest);
-        }
-        for (field_index, operand) in operands.iter().enumerate() {
-            let value = self.eval_operand(operand)?;
-            let field_dest = self.lvalue_field(dest, mir::Field::new(field_index), dest_ty, value.ty)?;
-            self.write_value(value, field_dest)?;
-        }
-        Ok(())
-    }
-
-    /// Evaluate an assignment statement.
-    ///
-    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
-    /// type writes its results directly into the memory specified by the lvalue.
-    pub(super) fn eval_rvalue_into_lvalue(
-        &mut self,
-        rvalue: &mir::Rvalue<'tcx>,
-        lvalue: &mir::Lvalue<'tcx>,
-    ) -> EvalResult<'tcx> {
-        let dest = self.eval_lvalue(lvalue)?;
-        let dest_ty = self.lvalue_ty(lvalue);
-        let dest_layout = self.type_layout(dest_ty)?;
-
-        use rustc::mir::Rvalue::*;
-        match *rvalue {
-            Use(ref operand) => {
-                let value = self.eval_operand(operand)?.value;
-                let valty = ValTy {
-                    value,
-                    ty: dest_ty,
-                };
-                self.write_value(valty, dest)?;
-            }
-
-            BinaryOp(bin_op, ref left, ref right) => {
-                let left = self.eval_operand(left)?;
-                let right = self.eval_operand(right)?;
-                if self.intrinsic_overflowing(
-                    bin_op,
-                    left,
-                    right,
-                    dest,
-                    dest_ty,
-                )?
-                {
-                    // There was an overflow in an unchecked binop.  Right now, we consider this an error and bail out.
-                    // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops
-                    // it emits in debug mode) is performance, but it doesn't cost us any performance in miri.
-                    // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops,
-                    // we have to go back to just ignoring the overflow here.
-                    return err!(OverflowingMath);
-                }
-            }
-
-            CheckedBinaryOp(bin_op, ref left, ref right) => {
-                let left = self.eval_operand(left)?;
-                let right = self.eval_operand(right)?;
-                self.intrinsic_with_overflow(
-                    bin_op,
-                    left,
-                    right,
-                    dest,
-                    dest_ty,
-                )?;
-            }
-
-            UnaryOp(un_op, ref operand) => {
-                let val = self.eval_operand_to_primval(operand)?;
-                let kind = self.ty_to_primval_kind(dest_ty)?;
-                self.write_primval(
-                    dest,
-                    operator::unary_op(un_op, val, kind)?,
-                    dest_ty,
-                )?;
-            }
-
-            // Skip everything for zsts
-            Aggregate(..) if self.type_size(dest_ty)? == Some(0) => {}
-
-            Aggregate(ref kind, ref operands) => {
-                self.inc_step_counter_and_check_limit(operands.len() as u64)?;
-                use rustc::ty::layout::Layout::*;
-                match *dest_layout {
-                    Univariant { ref variant, .. } => {
-                        self.write_maybe_aligned_mut(!variant.packed, |ecx| {
-                            ecx.assign_fields(dest, dest_ty, operands)
-                        })?;
-                    }
-
-                    Array { .. } => {
-                        self.assign_fields(dest, dest_ty, operands)?;
-                    }
-
-                    General {
-                        discr,
-                        ref variants,
-                        ..
-                    } => {
-                        if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
-                            let discr_val = adt_def
-                                .discriminants(self.tcx)
-                                .nth(variant)
-                                .expect("broken mir: Adt variant id invalid")
-                                .to_u128_unchecked();
-                            let discr_size = discr.size().bytes();
-
-                            self.assign_discr_and_fields(
-                                dest,
-                                dest_ty,
-                                variants[variant].offsets[0].bytes(),
-                                operands,
-                                discr_val,
-                                variant,
-                                discr_size,
-                                false,
-                            )?;
-                        } else {
-                            bug!("tried to assign {:?} to Layout::General", kind);
-                        }
-                    }
-
-                    RawNullablePointer { nndiscr, .. } => {
-                        if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
-                            if nndiscr == variant as u64 {
-                                assert_eq!(operands.len(), 1);
-                                let operand = &operands[0];
-                                let value = self.eval_operand(operand)?;
-                                self.write_value(value, dest)?;
-                            } else {
-                                if let Some(operand) = operands.get(0) {
-                                    assert_eq!(operands.len(), 1);
-                                    let operand_ty = self.operand_ty(operand);
-                                    assert_eq!(self.type_size(operand_ty)?, Some(0));
-                                }
-                                self.write_null(dest, dest_ty)?;
-                            }
-                        } else {
-                            bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
-                        }
-                    }
-
-                    StructWrappedNullablePointer {
-                        nndiscr,
-                        ref discrfield_source,
-                        ref nonnull,
-                        ..
-                    } => {
-                        if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
-                            if nndiscr == variant as u64 {
-                                self.write_maybe_aligned_mut(!nonnull.packed, |ecx| {
-                                    ecx.assign_fields(dest, dest_ty, operands)
-                                })?;
-                            } else {
-                                for operand in operands {
-                                    let operand_ty = self.operand_ty(operand);
-                                    assert_eq!(self.type_size(operand_ty)?, Some(0));
-                                }
-                                self.write_struct_wrapped_null_pointer(
-                                    dest_ty,
-                                    nndiscr,
-                                    discrfield_source,
-                                    dest,
-                                )?;
-                            }
-                        } else {
-                            bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
-                        }
-                    }
-
-                    CEnum { .. } => {
-                        assert_eq!(operands.len(), 0);
-                        if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
-                            let n = adt_def
-                                .discriminants(self.tcx)
-                                .nth(variant)
-                                .expect("broken mir: Adt variant index invalid")
-                                .to_u128_unchecked();
-                            self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?;
-                        } else {
-                            bug!("tried to assign {:?} to Layout::CEnum", kind);
-                        }
-                    }
-
-                    Vector { count, .. } => {
-                        debug_assert_eq!(count, operands.len() as u64);
-                        self.assign_fields(dest, dest_ty, operands)?;
-                    }
-
-                    UntaggedUnion { ref variants } => {
-                        assert_eq!(operands.len(), 1);
-                        let operand = &operands[0];
-                        let value = self.eval_operand(operand)?;
-                        self.write_maybe_aligned_mut(!variants.packed, |ecx| {
-                            ecx.write_value(value, dest)
-                        })?;
-                    }
-
-                    _ => {
-                        return err!(Unimplemented(format!(
-                            "can't handle destination layout {:?} when assigning {:?}",
-                            dest_layout,
-                            kind
-                        )));
-                    }
-                }
-            }
-
-            Repeat(ref operand, _) => {
-                let (elem_ty, length) = match dest_ty.sty {
-                    ty::TyArray(elem_ty, n) => (elem_ty, n.val.to_const_int().unwrap().to_u64().unwrap()),
-                    _ => {
-                        bug!(
-                            "tried to assign array-repeat to non-array type {:?}",
-                            dest_ty
-                        )
-                    }
-                };
-                self.inc_step_counter_and_check_limit(length)?;
-                let elem_size = self.type_size(elem_ty)?.expect(
-                    "repeat element type must be sized",
-                );
-                let value = self.eval_operand(operand)?.value;
-
-                // FIXME(solson)
-                let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
-
-                for i in 0..length {
-                    let elem_dest = dest.offset(i * elem_size, &self)?;
-                    self.write_value_to_ptr(value, elem_dest, elem_ty)?;
-                }
-            }
-
-            Len(ref lvalue) => {
-                // FIXME(CTFE): don't allow computing the length of arrays in const eval
-                let src = self.eval_lvalue(lvalue)?;
-                let ty = self.lvalue_ty(lvalue);
-                let (_, len) = src.elem_ty_and_len(ty);
-                self.write_primval(
-                    dest,
-                    PrimVal::from_u128(len as u128),
-                    dest_ty,
-                )?;
-            }
-
-            Ref(_, _, ref lvalue) => {
-                let src = self.eval_lvalue(lvalue)?;
-                // We ignore the alignment of the lvalue here -- special handling for packed structs ends
-                // at the `&` operator.
-                let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned();
-
-                let val = match extra {
-                    LvalueExtra::None => ptr.ptr.to_value(),
-                    LvalueExtra::Length(len) => ptr.ptr.to_value_with_len(len),
-                    LvalueExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable),
-                    LvalueExtra::DowncastVariant(..) => {
-                        bug!("attempted to take a reference to an enum downcast lvalue")
-                    }
-                };
-                let valty = ValTy {
-                    value: val,
-                    ty: dest_ty,
-                };
-                self.write_value(valty, dest)?;
-            }
-
-            NullaryOp(mir::NullOp::Box, ty) => {
-                M::box_alloc(self, ty, dest)?;
-            }
-
-            NullaryOp(mir::NullOp::SizeOf, ty) => {
-                let size = self.type_size(ty)?.expect(
-                    "SizeOf nullary MIR operator called for unsized type",
-                );
-                self.write_primval(
-                    dest,
-                    PrimVal::from_u128(size as u128),
-                    dest_ty,
-                )?;
-            }
-
-            Cast(kind, ref operand, cast_ty) => {
-                debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
-                use rustc::mir::CastKind::*;
-                match kind {
-                    Unsize => {
-                        let src = self.eval_operand(operand)?;
-                        self.unsize_into(src.value, src.ty, dest, dest_ty)?;
-                    }
-
-                    Misc => {
-                        let src = self.eval_operand(operand)?;
-                        if self.type_is_fat_ptr(src.ty) {
-                            match (src.value, self.type_is_fat_ptr(dest_ty)) {
-                                (Value::ByRef { .. }, _) |
-                                (Value::ByValPair(..), true) => {
-                                    let valty = ValTy {
-                                        value: src.value,
-                                        ty: dest_ty,
-                                    };
-                                    self.write_value(valty, dest)?;
-                                }
-                                (Value::ByValPair(data, _), false) => {
-                                    let valty = ValTy {
-                                        value: Value::ByVal(data),
-                                        ty: dest_ty,
-                                    };
-                                    self.write_value(valty, dest)?;
-                                }
-                                (Value::ByVal(_), _) => bug!("expected fat ptr"),
-                            }
-                        } else {
-                            let src_val = self.value_to_primval(src)?;
-                            let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?;
-                            let valty = ValTy {
-                                value: Value::ByVal(dest_val),
-                                ty: dest_ty,
-                            };
-                            self.write_value(valty, dest)?;
-                        }
-                    }
-
-                    ReifyFnPointer => {
-                        match self.operand_ty(operand).sty {
-                            ty::TyFnDef(def_id, substs) => {
-                                let instance = resolve(self.tcx, def_id, substs);
-                                let fn_ptr = self.memory.create_fn_alloc(instance);
-                                let valty = ValTy {
-                                    value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
-                                    ty: dest_ty,
-                                };
-                                self.write_value(valty, dest)?;
-                            }
-                            ref other => bug!("reify fn pointer on {:?}", other),
-                        }
-                    }
-
-                    UnsafeFnPointer => {
-                        match dest_ty.sty {
-                            ty::TyFnPtr(_) => {
-                                let mut src = self.eval_operand(operand)?;
-                                src.ty = dest_ty;
-                                self.write_value(src, dest)?;
-                            }
-                            ref other => bug!("fn to unsafe fn cast on {:?}", other),
-                        }
-                    }
-
-                    ClosureFnPointer => {
-                        match self.operand_ty(operand).sty {
-                            ty::TyClosure(def_id, substs) => {
-                                let instance = resolve_closure(
-                                    self.tcx,
-                                    def_id,
-                                    substs,
-                                    ty::ClosureKind::FnOnce,
-                                );
-                                let fn_ptr = self.memory.create_fn_alloc(instance);
-                                let valty = ValTy {
-                                    value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
-                                    ty: dest_ty,
-                                };
-                                self.write_value(valty, dest)?;
-                            }
-                            ref other => bug!("closure fn pointer on {:?}", other),
-                        }
-                    }
-                }
-            }
-
-            Discriminant(ref lvalue) => {
-                let lval = self.eval_lvalue(lvalue)?;
-                let ty = self.lvalue_ty(lvalue);
-                let ptr = self.force_allocation(lval)?.to_ptr()?;
-                let discr_val = self.read_discriminant_value(ptr, ty)?;
-                if let ty::TyAdt(adt_def, _) = ty.sty {
-                    trace!("Read discriminant {}, valid discriminants {:?}", discr_val, adt_def.discriminants(self.tcx).collect::<Vec<_>>());
-                    if adt_def.discriminants(self.tcx).all(|v| {
-                        discr_val != v.to_u128_unchecked()
-                    })
-                    {
-                        return err!(InvalidDiscriminant);
-                    }
-                    self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
-                } else {
-                    bug!("rustc only generates Rvalue::Discriminant for enums");
-                }
-            }
-        }
-
-        if log_enabled!(::log::LogLevel::Trace) {
-            self.dump_local(dest);
-        }
-
-        Ok(())
-    }
-
-    pub(crate) fn write_struct_wrapped_null_pointer(
-        &mut self,
-        dest_ty: ty::Ty<'tcx>,
-        nndiscr: u64,
-        discrfield_source: &layout::FieldPath,
-        dest: Lvalue,
-    ) -> EvalResult<'tcx> {
-        let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(
-            dest_ty,
-            nndiscr,
-            discrfield_source,
-        )?;
-        let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(
-            offset.bytes(),
-            &self,
-        )?;
-        trace!("struct wrapped nullable pointer type: {}", ty);
-        // only the pointer part of a fat pointer is used for this space optimization
-        let discr_size = self.type_size(ty)?.expect(
-            "bad StructWrappedNullablePointer discrfield",
-        );
-        self.memory.write_maybe_aligned_mut(!packed, |mem| {
-            // We're writing 0, signedness does not matter
-            mem.write_primval(nonnull, PrimVal::Bytes(0), discr_size, false)
-        })
-    }
-
-    pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
-        match ty.sty {
-            ty::TyRawPtr(ref tam) |
-            ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty),
-            ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
-            _ => false,
-        }
-    }
-
-    pub(super) fn nonnull_offset_and_ty(
-        &self,
-        ty: Ty<'tcx>,
-        nndiscr: u64,
-        discrfield: &[u32],
-    ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
-        // Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant
-        let path = discrfield.iter().skip(2).map(|&i| i as usize);
-
-        // Handle the field index for the outer non-null variant.
-        let (inner_offset, inner_ty) = match ty.sty {
-            ty::TyAdt(adt_def, substs) => {
-                let variant = &adt_def.variants[nndiscr as usize];
-                let index = discrfield[1];
-                let field = &variant.fields[index as usize];
-                (
-                    self.get_field_offset(ty, index as usize)?,
-                    field.ty(self.tcx, substs),
-                )
-            }
-            _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty),
-        };
-
-        self.field_path_offset_and_ty(inner_offset, inner_ty, path)
-    }
-
-    fn field_path_offset_and_ty<I: Iterator<Item = usize>>(
-        &self,
-        mut offset: Size,
-        mut ty: Ty<'tcx>,
-        path: I,
-    ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
-        // Skip the initial 0 intended for LLVM GEP.
-        let mut packed = false;
-        for field_index in path {
-            let field_offset = self.get_field_offset(ty, field_index)?;
-            trace!(
-                "field_path_offset_and_ty: {}, {}, {:?}, {:?}",
-                field_index,
-                ty,
-                field_offset,
-                offset
-            );
-            let field_ty = self.get_field_ty(ty, field_index)?;
-            ty = field_ty.ty;
-            packed = packed || field_ty.packed;
-            offset = offset
-                .checked_add(field_offset, &self.tcx.data_layout)
-                .unwrap();
-        }
-
-        Ok((offset, TyAndPacked { ty, packed }))
-    }
-    fn get_fat_field(
-        &self,
-        pointee_ty: Ty<'tcx>,
-        field_index: usize,
-    ) -> EvalResult<'tcx, Ty<'tcx>> {
-        match (field_index, &self.tcx.struct_tail(pointee_ty).sty) {
-            (1, &ty::TyStr) |
-            (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize),
-            (1, &ty::TyDynamic(..)) |
-            (0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)),
-            _ => bug!("invalid fat pointee type: {}", pointee_ty),
-        }
-    }
-
-    /// Returns the field type and whether the field is packed
-    pub fn get_field_ty(
-        &self,
-        ty: Ty<'tcx>,
-        field_index: usize,
-    ) -> EvalResult<'tcx, TyAndPacked<'tcx>> {
-        match ty.sty {
-            ty::TyAdt(adt_def, _) if adt_def.is_box() => Ok(TyAndPacked {
-                ty: self.get_fat_field(ty.boxed_ty(), field_index)?,
-                packed: false,
-            }),
-            ty::TyAdt(adt_def, substs) if adt_def.is_enum() => {
-                use rustc::ty::layout::Layout::*;
-                match *self.type_layout(ty)? {
-                    RawNullablePointer { nndiscr, .. } => Ok(TyAndPacked {
-                        ty: adt_def.variants[nndiscr as usize].fields[field_index].ty(
-                            self.tcx,
-                            substs,
-                        ),
-                        packed: false,
-                    }),
-                    StructWrappedNullablePointer {
-                        nndiscr,
-                        ref nonnull,
-                        ..
-                    } => {
-                        let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty(
-                            self.tcx,
-                            substs,
-                        );
-                        Ok(TyAndPacked {
-                            ty,
-                            packed: nonnull.packed,
-                        })
-                    }
-                    // mir optimizations treat single variant enums as structs
-                    General { .. } if adt_def.variants.len() == 1 => Ok(TyAndPacked {
-                        ty: adt_def.variants[0].fields[field_index].ty(self.tcx, substs),
-                        packed: false,
-                    }),
-                    _ => {
-                        err!(Unimplemented(format!(
-                            "get_field_ty can't handle enum type: {:?}, {:?}",
-                            ty,
-                            ty.sty
-                        )))
-                    }
-                }
-            }
-            ty::TyAdt(adt_def, substs) => {
-                let variant_def = adt_def.struct_variant();
-                use rustc::ty::layout::Layout::*;
-                match *self.type_layout(ty)? {
-                    UntaggedUnion { ref variants } => Ok(TyAndPacked {
-                        ty: variant_def.fields[field_index].ty(self.tcx, substs),
-                        packed: variants.packed,
-                    }),
-                    Univariant { ref variant, .. } => Ok(TyAndPacked {
-                        ty: variant_def.fields[field_index].ty(self.tcx, substs),
-                        packed: variant.packed,
-                    }),
-                    _ => {
-                        err!(Unimplemented(format!(
-                            "get_field_ty can't handle struct type: {:?}, {:?}",
-                            ty,
-                            ty.sty
-                        )))
-                    }
-                }
-            }
-
-            ty::TyTuple(fields, _) => Ok(TyAndPacked {
-                ty: fields[field_index],
-                packed: false,
-            }),
-
-            ty::TyRef(_, ref tam) |
-            ty::TyRawPtr(ref tam) => Ok(TyAndPacked {
-                ty: self.get_fat_field(tam.ty, field_index)?,
-                packed: false,
-            }),
-
-            ty::TyArray(ref inner, _) => Ok(TyAndPacked {
-                ty: inner,
-                packed: false,
-            }),
-
-            ty::TyClosure(def_id, ref closure_substs) => Ok(TyAndPacked {
-                ty: closure_substs.upvar_tys(def_id, self.tcx).nth(field_index).unwrap(),
-                packed: false,
-            }),
-
-            _ => {
-                err!(Unimplemented(
-                    format!("can't handle type: {:?}, {:?}", ty, ty.sty),
-                ))
-            }
-        }
-    }
-
-    fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
-        // Also see lvalue_field in lvalue.rs, which handles more cases but needs an actual value at the given type
-        let layout = self.type_layout(ty)?;
-
-        use rustc::ty::layout::Layout::*;
-        match *layout {
-            Univariant { ref variant, .. } => Ok(variant.offsets[field_index]),
-            FatPointer { .. } => {
-                let bytes = field_index as u64 * self.memory.pointer_size();
-                Ok(Size::from_bytes(bytes))
-            }
-            StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets[field_index]),
-            UntaggedUnion { .. } => Ok(Size::from_bytes(0)),
-            // mir optimizations treat single variant enums as structs
-            General { ref variants, .. } if variants.len() == 1 => Ok(variants[0].offsets[field_index]),
-            _ => {
-                let msg = format!(
-                    "get_field_offset: can't handle type: {:?}, with layout: {:?}",
-                    ty,
-                    layout
-                );
-                err!(Unimplemented(msg))
-            }
-        }
-    }
-
-    pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
-        let layout = self.type_layout(ty)?;
-
-        use rustc::ty::layout::Layout::*;
-        match *layout {
-            Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64),
-            FatPointer { .. } => Ok(2),
-            StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64),
-            Vector { count, .. } |
-            Array { count, .. } => Ok(count),
-            Scalar { .. } => Ok(0),
-            UntaggedUnion { .. } => Ok(1),
-            _ => {
-                let msg = format!(
-                    "get_field_count: can't handle type: {:?}, with layout: {:?}",
-                    ty,
-                    layout
-                );
-                err!(Unimplemented(msg))
-            }
-        }
-    }
-
-    pub(super) fn eval_operand_to_primval(
-        &mut self,
-        op: &mir::Operand<'tcx>,
-    ) -> EvalResult<'tcx, PrimVal> {
-        let valty = self.eval_operand(op)?;
-        self.value_to_primval(valty)
-    }
-
-    pub(crate) fn operands_to_args(
-        &mut self,
-        ops: &[mir::Operand<'tcx>],
-    ) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
-        ops.into_iter()
-            .map(|op| self.eval_operand(op))
-            .collect()
-    }
-
-    pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
-        use rustc::mir::Operand::*;
-        match *op {
-            Consume(ref lvalue) => {
-                Ok(ValTy {
-                    value: self.eval_and_read_lvalue(lvalue)?,
-                    ty: self.operand_ty(op),
-                })
-            },
-
-            Constant(ref constant) => {
-                use rustc::mir::Literal;
-                let mir::Constant { ref literal, .. } = **constant;
-                let value = match *literal {
-                    Literal::Value { ref value } => self.const_to_value(&value.val)?,
-
-                    Literal::Promoted { index } => {
-                        let cid = GlobalId {
-                            instance: self.frame().instance,
-                            promoted: Some(index),
-                        };
-                        Value::ByRef(*self.globals.get(&cid).expect("promoted not cached"))
-                    }
-                };
-
-                Ok(ValTy {
-                    value,
-                    ty: self.operand_ty(op),
-                })
-            }
-        }
-    }
-
-    pub fn read_discriminant_value(
-        &self,
-        adt_ptr: MemoryPointer,
-        adt_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, u128> {
-        use rustc::ty::layout::Layout::*;
-        let adt_layout = self.type_layout(adt_ty)?;
-        //trace!("read_discriminant_value {:#?}", adt_layout);
-
-        let discr_val = match *adt_layout {
-            General { discr, .. } => {
-                let discr_size = discr.size().bytes();
-                self.memory.read_primval(adt_ptr, discr_size, false)?.to_bytes()?
-            }
-
-            CEnum {
-                discr,
-                signed,
-                ..
-            } => {
-                let discr_size = discr.size().bytes();
-                self.memory.read_primval(adt_ptr, discr_size, signed)?.to_bytes()?
-            }
-
-            RawNullablePointer { nndiscr, value } => {
-                let discr_size = value.size(&self.tcx.data_layout).bytes();
-                trace!("rawnullablepointer with size {}", discr_size);
-                self.read_nonnull_discriminant_value(
-                    adt_ptr,
-                    nndiscr as u128,
-                    discr_size,
-                )?
-            }
-
-            StructWrappedNullablePointer {
-                nndiscr,
-                ref discrfield_source,
-                ..
-            } => {
-                let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(
-                    adt_ty,
-                    nndiscr,
-                    discrfield_source,
-                )?;
-                let nonnull = adt_ptr.offset(offset.bytes(), &*self)?;
-                trace!("struct wrapped nullable pointer type: {}", ty);
-                // only the pointer part of a fat pointer is used for this space optimization
-                let discr_size = self.type_size(ty)?.expect(
-                    "bad StructWrappedNullablePointer discrfield",
-                );
-                self.read_maybe_aligned(!packed, |ectx| {
-                    ectx.read_nonnull_discriminant_value(nonnull, nndiscr as u128, discr_size)
-                })?
-            }
-
-            // The discriminant_value intrinsic returns 0 for non-sum types.
-            Array { .. } |
-            FatPointer { .. } |
-            Scalar { .. } |
-            Univariant { .. } |
-            Vector { .. } |
-            UntaggedUnion { .. } => 0,
-        };
-
-        Ok(discr_val)
-    }
-
-    fn read_nonnull_discriminant_value(
-        &self,
-        ptr: MemoryPointer,
-        nndiscr: u128,
-        discr_size: u64,
-    ) -> EvalResult<'tcx, u128> {
-        trace!(
-            "read_nonnull_discriminant_value: {:?}, {}, {}",
-            ptr,
-            nndiscr,
-            discr_size
-        );
-        // We are only interested in 0 vs. non-0, the sign does not matter for this
-        let null = match self.memory.read_primval(ptr, discr_size, false)? {
-            PrimVal::Bytes(0) => true,
-            PrimVal::Bytes(_) |
-            PrimVal::Ptr(..) => false,
-            PrimVal::Undef => return err!(ReadUndefBytes),
-        };
-        assert!(nndiscr == 0 || nndiscr == 1);
-        Ok(if !null { nndiscr } else { 1 - nndiscr })
-    }
-
-    pub fn read_global_as_value(&self, gid: GlobalId) -> Value {
-        Value::ByRef(*self.globals.get(&gid).expect("global not cached"))
-    }
-
-    pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
-        self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
-    }
-
-    fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
-        let size = self.type_size(ty)?.expect(
-            "cannot copy from an unsized type",
-        );
-        let align = self.type_align(ty)?;
-        self.memory.copy(src, dest, size, align, false)?;
-        Ok(())
-    }
-
-    pub fn is_packed(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, bool> {
-        let layout = self.type_layout(ty)?;
-        use rustc::ty::layout::Layout::*;
-        Ok(match *layout {
-            Univariant { ref variant, .. } => variant.packed,
-
-            StructWrappedNullablePointer { ref nonnull, .. } => nonnull.packed,
-
-            UntaggedUnion { ref variants } => variants.packed,
-
-            // can only apply #[repr(packed)] to struct and union
-            _ => false,
-        })
-    }
-
-    pub fn force_allocation(&mut self, lvalue: Lvalue) -> EvalResult<'tcx, Lvalue> {
-        let new_lvalue = match lvalue {
-            Lvalue::Local { frame, local } => {
-                // -1 since we don't store the return value
-                match self.stack[frame].locals[local.index() - 1] {
-                    None => return err!(DeadLocal),
-                    Some(Value::ByRef(ptr)) => {
-                        Lvalue::Ptr {
-                            ptr,
-                            extra: LvalueExtra::None,
-                        }
-                    }
-                    Some(val) => {
-                        let ty = self.stack[frame].mir.local_decls[local].ty;
-                        let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
-                        let substs = self.stack[frame].instance.substs;
-                        let ptr = self.alloc_ptr_with_substs(ty, substs)?;
-                        self.stack[frame].locals[local.index() - 1] =
-                            Some(Value::by_ref(ptr.into())); // it stays live
-                        self.write_value_to_ptr(val, ptr.into(), ty)?;
-                        Lvalue::from_ptr(ptr)
-                    }
-                }
-            }
-            Lvalue::Ptr { .. } => lvalue,
-        };
-        Ok(new_lvalue)
-    }
-
-    /// ensures this Value is not a ByRef
-    pub(super) fn follow_by_ref_value(
-        &self,
-        value: Value,
-        ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, Value> {
-        match value {
-            Value::ByRef(PtrAndAlign { ptr, aligned }) => {
-                self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
-            }
-            other => Ok(other),
-        }
-    }
-
-    pub fn value_to_primval(
-        &self,
-        ValTy { value, ty } : ValTy<'tcx>,
-    ) -> EvalResult<'tcx, PrimVal> {
-        match self.follow_by_ref_value(value, ty)? {
-            Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
-
-            Value::ByVal(primval) => {
-                // TODO: Do we really want insta-UB here?
-                self.ensure_valid_value(primval, ty)?;
-                Ok(primval)
-            }
-
-            Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
-        }
-    }
-
-    pub fn write_null(&mut self, dest: Lvalue, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
-        self.write_primval(dest, PrimVal::Bytes(0), dest_ty)
-    }
-
-    pub fn write_ptr(&mut self, dest: Lvalue, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
-        let valty = ValTy {
-            value: val.to_value(),
-            ty: dest_ty,
-        };
-        self.write_value(valty, dest)
-    }
-
-    pub fn write_primval(
-        &mut self,
-        dest: Lvalue,
-        val: PrimVal,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        let valty = ValTy {
-            value: Value::ByVal(val),
-            ty: dest_ty,
-        };
-        self.write_value(valty, dest)
-    }
-
-    pub fn write_value(
-        &mut self,
-        ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
-        dest: Lvalue,
-    ) -> EvalResult<'tcx> {
-        //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
-        // Note that it is really important that the type here is the right one, and matches the type things are read at.
-        // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only
-        // correct if we never look at this data with the wrong type.
-
-        match dest {
-            Lvalue::Ptr {
-                ptr: PtrAndAlign { ptr, aligned },
-                extra,
-            } => {
-                assert_eq!(extra, LvalueExtra::None);
-                self.write_maybe_aligned_mut(
-                    aligned,
-                    |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty),
-                )
-            }
-
-            Lvalue::Local { frame, local } => {
-                let dest = self.stack[frame].get_local(local)?;
-                self.write_value_possibly_by_val(
-                    src_val,
-                    |this, val| this.stack[frame].set_local(local, val),
-                    dest,
-                    dest_ty,
-                )
-            }
-        }
-    }
-
-    // The cases here can be a bit subtle. Read carefully!
-    fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
-        &mut self,
-        src_val: Value,
-        write_dest: F,
-        old_dest_val: Value,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        if let Value::ByRef(PtrAndAlign {
-                                ptr: dest_ptr,
-                                aligned,
-                            }) = old_dest_val
-        {
-            // If the value is already `ByRef` (that is, backed by an `Allocation`),
-            // then we must write the new value into this allocation, because there may be
-            // other pointers into the allocation. These other pointers are logically
-            // pointers into the local variable, and must be able to observe the change.
-            //
-            // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
-            // knew for certain that there were no outstanding pointers to this allocation.
-            self.write_maybe_aligned_mut(aligned, |ectx| {
-                ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty)
-            })?;
-
-        } else if let Value::ByRef(PtrAndAlign {
-                                       ptr: src_ptr,
-                                       aligned,
-                                   }) = src_val
-        {
-            // If the value is not `ByRef`, then we know there are no pointers to it
-            // and we can simply overwrite the `Value` in the locals array directly.
-            //
-            // In this specific case, where the source value is `ByRef`, we must duplicate
-            // the allocation, because this is a by-value operation. It would be incorrect
-            // if they referred to the same allocation, since then a change to one would
-            // implicitly change the other.
-            //
-            // It is a valid optimization to attempt reading a primitive value out of the
-            // source and write that into the destination without making an allocation, so
-            // we do so here.
-            self.read_maybe_aligned_mut(aligned, |ectx| {
-                if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
-                    write_dest(ectx, src_val)?;
-                } else {
-                    let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
-                    ectx.copy(src_ptr, dest_ptr, dest_ty)?;
-                    write_dest(ectx, Value::by_ref(dest_ptr))?;
-                }
-                Ok(())
-            })?;
-
-        } else {
-            // Finally, we have the simple case where neither source nor destination are
-            // `ByRef`. We may simply copy the source value over the the destintion.
-            write_dest(self, src_val)?;
-        }
-        Ok(())
-    }
-
-    pub fn write_value_to_ptr(
-        &mut self,
-        value: Value,
-        dest: Pointer,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        match value {
-            Value::ByRef(PtrAndAlign { ptr, aligned }) => {
-                self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
-            }
-            Value::ByVal(primval) => {
-                let size = self.type_size(dest_ty)?.expect("dest type must be sized");
-                if size == 0 {
-                    assert!(primval.is_undef());
-                    Ok(())
-                } else {
-                    // TODO: Do we need signedness?
-                    self.memory.write_primval(dest.to_ptr()?, primval, size, false)
-                }
-            }
-            Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty),
-        }
-    }
-
-    pub fn write_pair_to_ptr(
-        &mut self,
-        a: PrimVal,
-        b: PrimVal,
-        ptr: MemoryPointer,
-        mut ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        let mut packed = false;
-        while self.get_field_count(ty)? == 1 {
-            let field = self.get_field_ty(ty, 0)?;
-            ty = field.ty;
-            packed = packed || field.packed;
-        }
-        assert_eq!(self.get_field_count(ty)?, 2);
-        let field_0 = self.get_field_offset(ty, 0)?;
-        let field_1 = self.get_field_offset(ty, 1)?;
-        let field_0_ty = self.get_field_ty(ty, 0)?;
-        let field_1_ty = self.get_field_ty(ty, 1)?;
-        assert_eq!(
-            field_0_ty.packed,
-            field_1_ty.packed,
-            "the two fields must agree on being packed"
-        );
-        packed = packed || field_0_ty.packed;
-        let field_0_size = self.type_size(field_0_ty.ty)?.expect(
-            "pair element type must be sized",
-        );
-        let field_1_size = self.type_size(field_1_ty.ty)?.expect(
-            "pair element type must be sized",
-        );
-        let field_0_ptr = ptr.offset(field_0.bytes(), &self)?.into();
-        let field_1_ptr = ptr.offset(field_1.bytes(), &self)?.into();
-        // TODO: What about signedess?
-        self.write_maybe_aligned_mut(!packed, |ectx| {
-            ectx.memory.write_primval(field_0_ptr, a, field_0_size, false)
-        })?;
-        self.write_maybe_aligned_mut(!packed, |ectx| {
-            ectx.memory.write_primval(field_1_ptr, b, field_1_size, false)
-        })?;
-        Ok(())
-    }
-
-    pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> {
-        use syntax::ast::FloatTy;
-
-        let kind = match ty.sty {
-            ty::TyBool => PrimValKind::Bool,
-            ty::TyChar => PrimValKind::Char,
-
-            ty::TyInt(int_ty) => {
-                use syntax::ast::IntTy::*;
-                let size = match int_ty {
-                    I8 => 1,
-                    I16 => 2,
-                    I32 => 4,
-                    I64 => 8,
-                    I128 => 16,
-                    Is => self.memory.pointer_size(),
-                };
-                PrimValKind::from_int_size(size)
-            }
-
-            ty::TyUint(uint_ty) => {
-                use syntax::ast::UintTy::*;
-                let size = match uint_ty {
-                    U8 => 1,
-                    U16 => 2,
-                    U32 => 4,
-                    U64 => 8,
-                    U128 => 16,
-                    Us => self.memory.pointer_size(),
-                };
-                PrimValKind::from_uint_size(size)
-            }
-
-            ty::TyFloat(FloatTy::F32) => PrimValKind::F32,
-            ty::TyFloat(FloatTy::F64) => PrimValKind::F64,
-
-            ty::TyFnPtr(_) => PrimValKind::FnPtr,
-
-            ty::TyRef(_, ref tam) |
-            ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr,
-
-            ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr,
-
-            ty::TyAdt(def, substs) => {
-                use rustc::ty::layout::Layout::*;
-                match *self.type_layout(ty)? {
-                    CEnum { discr, signed, .. } => {
-                        let size = discr.size().bytes();
-                        if signed {
-                            PrimValKind::from_int_size(size)
-                        } else {
-                            PrimValKind::from_uint_size(size)
-                        }
-                    }
-
-                    RawNullablePointer { value, .. } => {
-                        use rustc::ty::layout::Primitive::*;
-                        match value {
-                            // TODO(solson): Does signedness matter here? What should the sign be?
-                            Int(int) => PrimValKind::from_uint_size(int.size().bytes()),
-                            F32 => PrimValKind::F32,
-                            F64 => PrimValKind::F64,
-                            Pointer => PrimValKind::Ptr,
-                        }
-                    }
-
-                    // represent single field structs as their single field
-                    Univariant { .. } => {
-                        // enums with just one variant are no different, but `.struct_variant()` doesn't work for enums
-                        let variant = &def.variants[0];
-                        // FIXME: also allow structs with only a single non zst field
-                        if variant.fields.len() == 1 {
-                            return self.ty_to_primval_kind(variant.fields[0].ty(self.tcx, substs));
-                        } else {
-                            return err!(TypeNotPrimitive(ty));
-                        }
-                    }
-
-                    _ => return err!(TypeNotPrimitive(ty)),
-                }
-            }
-
-            _ => return err!(TypeNotPrimitive(ty)),
-        };
-
-        Ok(kind)
-    }
-
-    fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> {
-        match ty.sty {
-            ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool),
-
-            ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => {
-                err!(InvalidChar(val.to_bytes()? as u32 as u128))
-            }
-
-            _ => Ok(()),
-        }
-    }
-
-    pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
-        if let Some(val) = self.try_read_value(ptr, ty)? {
-            Ok(val)
-        } else {
-            bug!("primitive read failed for type: {:?}", ty);
-        }
-    }
-
-    pub(crate) fn read_ptr(
-        &self,
-        ptr: MemoryPointer,
-        pointee_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, Value> {
-        let ptr_size = self.memory.pointer_size();
-        let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into();
-        if self.type_is_sized(pointee_ty) {
-            Ok(p.to_value())
-        } else {
-            trace!("reading fat pointer extra of type {}", pointee_ty);
-            let extra = ptr.offset(ptr_size, self)?;
-            match self.tcx.struct_tail(pointee_ty).sty {
-                ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
-                    self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?,
-                )),
-                ty::TySlice(..) | ty::TyStr => Ok(
-                    p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64),
-                ),
-                _ => bug!("unsized primval ptr read from {:?}", pointee_ty),
-            }
-        }
-    }
-
-    fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
-        use syntax::ast::FloatTy;
-
-        let ptr = ptr.to_ptr()?;
-        let val = match ty.sty {
-            ty::TyBool => {
-                let val = self.memory.read_primval(ptr, 1, false)?;
-                let val = match val {
-                    PrimVal::Bytes(0) => false,
-                    PrimVal::Bytes(1) => true,
-                    // TODO: This seems a little overeager, should reading at bool type already be insta-UB?
-                    _ => return err!(InvalidBool),
-                };
-                PrimVal::from_bool(val)
-            }
-            ty::TyChar => {
-                let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32;
-                match ::std::char::from_u32(c) {
-                    Some(ch) => PrimVal::from_char(ch),
-                    None => return err!(InvalidChar(c as u128)),
-                }
-            }
-
-            ty::TyInt(int_ty) => {
-                use syntax::ast::IntTy::*;
-                let size = match int_ty {
-                    I8 => 1,
-                    I16 => 2,
-                    I32 => 4,
-                    I64 => 8,
-                    I128 => 16,
-                    Is => self.memory.pointer_size(),
-                };
-                self.memory.read_primval(ptr, size, true)?
-            }
-
-            ty::TyUint(uint_ty) => {
-                use syntax::ast::UintTy::*;
-                let size = match uint_ty {
-                    U8 => 1,
-                    U16 => 2,
-                    U32 => 4,
-                    U64 => 8,
-                    U128 => 16,
-                    Us => self.memory.pointer_size(),
-                };
-                self.memory.read_primval(ptr, size, false)?
-            }
-
-            ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?),
-            ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?),
-
-            ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?,
-            ty::TyRef(_, ref tam) |
-            ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some),
-
-            ty::TyAdt(def, _) => {
-                if def.is_box() {
-                    return self.read_ptr(ptr, ty.boxed_ty()).map(Some);
-                }
-                use rustc::ty::layout::Layout::*;
-                if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
-                    let size = discr.size().bytes();
-                    self.memory.read_primval(ptr, size, signed)?
-                } else {
-                    return Ok(None);
-                }
-            }
-
-            _ => return Ok(None),
-        };
-
-        Ok(Some(Value::ByVal(val)))
-    }
-
-    pub fn frame(&self) -> &Frame<'tcx> {
-        self.stack.last().expect("no call frames exist")
-    }
-
-    pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> {
-        self.stack.last_mut().expect("no call frames exist")
-    }
-
-    pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> {
-        self.frame().mir
-    }
-
-    pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
-        self.frame().instance.substs
-    }
-
-    fn unsize_into_ptr(
-        &mut self,
-        src: Value,
-        src_ty: Ty<'tcx>,
-        dest: Lvalue,
-        dest_ty: Ty<'tcx>,
-        sty: Ty<'tcx>,
-        dty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        // A<Struct> -> A<Trait> conversion
-        let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
-
-        match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
-            (&ty::TyArray(_, length), &ty::TySlice(_)) => {
-                let ptr = src.into_ptr(&self.memory)?;
-                // u64 cast is from usize to u64, which is always good
-                let valty = ValTy {
-                    value: ptr.to_value_with_len(length.val.to_const_int().unwrap().to_u64().unwrap() ),
-                    ty: dest_ty,
-                };
-                self.write_value(valty, dest)
-            }
-            (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
-                // For now, upcasts are limited to changes in marker
-                // traits, and hence never actually require an actual
-                // change to the vtable.
-                let valty = ValTy {
-                    value: src,
-                    ty: dest_ty,
-                };
-                self.write_value(valty, dest)
-            }
-            (_, &ty::TyDynamic(ref data, _)) => {
-                let trait_ref = data.principal().unwrap().with_self_ty(
-                    self.tcx,
-                    src_pointee_ty,
-                );
-                let trait_ref = self.tcx.erase_regions(&trait_ref);
-                let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
-                let ptr = src.into_ptr(&self.memory)?;
-                let valty = ValTy {
-                    value: ptr.to_value_with_vtable(vtable),
-                    ty: dest_ty,
-                };
-                self.write_value(valty, dest)
-            }
-
-            _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
-        }
-    }
-
-    fn unsize_into(
-        &mut self,
-        src: Value,
-        src_ty: Ty<'tcx>,
-        dest: Lvalue,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        match (&src_ty.sty, &dest_ty.sty) {
-            (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
-            (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
-            (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => {
-                self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty)
-            }
-            (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
-                if def_a.is_box() || def_b.is_box() {
-                    if !def_a.is_box() || !def_b.is_box() {
-                        panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty);
-                    }
-                    return self.unsize_into_ptr(
-                        src,
-                        src_ty,
-                        dest,
-                        dest_ty,
-                        src_ty.boxed_ty(),
-                        dest_ty.boxed_ty(),
-                    );
-                }
-                if self.ty_to_primval_kind(src_ty).is_ok() {
-                    // TODO: We ignore the packed flag here
-                    let sty = self.get_field_ty(src_ty, 0)?.ty;
-                    let dty = self.get_field_ty(dest_ty, 0)?.ty;
-                    return self.unsize_into(src, sty, dest, dty);
-                }
-                // unsizing of generic struct with pointer fields
-                // Example: `Arc<T>` -> `Arc<Trait>`
-                // here we need to increase the size of every &T thin ptr field to a fat ptr
-
-                assert_eq!(def_a, def_b);
-
-                let src_fields = def_a.variants[0].fields.iter();
-                let dst_fields = def_b.variants[0].fields.iter();
-
-                //let src = adt::MaybeSizedValue::sized(src);
-                //let dst = adt::MaybeSizedValue::sized(dst);
-                let src_ptr = match src {
-                    Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
-                    // TODO: Is it possible for unaligned pointers to occur here?
-                    _ => bug!("expected aligned pointer, got {:?}", src),
-                };
-
-                // FIXME(solson)
-                let dest = self.force_allocation(dest)?.to_ptr()?;
-                let iter = src_fields.zip(dst_fields).enumerate();
-                for (i, (src_f, dst_f)) in iter {
-                    let src_fty = self.field_ty(substs_a, src_f);
-                    let dst_fty = self.field_ty(substs_b, dst_f);
-                    if self.type_size(dst_fty)? == Some(0) {
-                        continue;
-                    }
-                    let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
-                    let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
-                    let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
-                    let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
-                    if src_fty == dst_fty {
-                        self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
-                    } else {
-                        self.unsize_into(
-                            Value::by_ref(src_f_ptr),
-                            src_fty,
-                            Lvalue::from_ptr(dst_f_ptr),
-                            dst_fty,
-                        )?;
-                    }
-                }
-                Ok(())
-            }
-            _ => {
-                bug!(
-                    "unsize_into: invalid conversion: {:?} -> {:?}",
-                    src_ty,
-                    dest_ty
-                )
-            }
-        }
-    }
-
-    pub fn dump_local(&self, lvalue: Lvalue) {
-        // Debug output
-        match lvalue {
-            Lvalue::Local { frame, local } => {
-                let mut allocs = Vec::new();
-                let mut msg = format!("{:?}", local);
-                if frame != self.cur_frame() {
-                    write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
-                }
-                write!(msg, ":").unwrap();
-
-                match self.stack[frame].get_local(local) {
-                    Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }) => {
-                        write!(msg, " is dead").unwrap();
-                    }
-                    Err(err) => {
-                        panic!("Failed to access local: {:?}", err);
-                    }
-                    Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => {
-                        match ptr.into_inner_primval() {
-                            PrimVal::Ptr(ptr) => {
-                                write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " })
-                                    .unwrap();
-                                allocs.push(ptr.alloc_id);
-                            }
-                            ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
-                        }
-                    }
-                    Ok(Value::ByVal(val)) => {
-                        write!(msg, " {:?}", val).unwrap();
-                        if let PrimVal::Ptr(ptr) = val {
-                            allocs.push(ptr.alloc_id);
-                        }
-                    }
-                    Ok(Value::ByValPair(val1, val2)) => {
-                        write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
-                        if let PrimVal::Ptr(ptr) = val1 {
-                            allocs.push(ptr.alloc_id);
-                        }
-                        if let PrimVal::Ptr(ptr) = val2 {
-                            allocs.push(ptr.alloc_id);
-                        }
-                    }
-                }
-
-                trace!("{}", msg);
-                self.memory.dump_allocs(allocs);
-            }
-            Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => {
-                match ptr.into_inner_primval() {
-                    PrimVal::Ptr(ptr) => {
-                        trace!("by {}ref:", if aligned { "" } else { "unaligned " });
-                        self.memory.dump_alloc(ptr.alloc_id);
-                    }
-                    ptr => trace!(" integral by ref: {:?}", ptr),
-                }
-            }
-        }
-    }
-
-    /// Convenience function to ensure correct usage of locals
-    pub fn modify_local<F>(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx>
-    where
-        F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
-    {
-        let val = self.stack[frame].get_local(local)?;
-        let new_val = f(self, val)?;
-        self.stack[frame].set_local(local, new_val)?;
-        // FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
-        // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
-        //     self.memory.deallocate(ptr)?;
-        // }
-        Ok(())
-    }
-
-    pub fn report(&self, e: &mut EvalError) {
-        if let Some(ref mut backtrace) = e.backtrace {
-            let mut trace_text = "\n\nAn error occurred in miri:\n".to_string();
-            let mut skip_init = true;
-            backtrace.resolve();
-            'frames: for (i, frame) in backtrace.frames().iter().enumerate() {
-                for symbol in frame.symbols() {
-                    if let Some(name) = symbol.name() {
-                        // unmangle the symbol via `to_string`
-                        let name = name.to_string();
-                        if name.starts_with("miri::after_analysis") {
-                            // don't report initialization gibberish
-                            break 'frames;
-                        } else if name.starts_with("backtrace::capture::Backtrace::new")
-                            // debug mode produces funky symbol names
-                            || name.starts_with("backtrace::capture::{{impl}}::new")
-                        {
-                            // don't report backtrace internals
-                            skip_init = false;
-                            continue 'frames;
-                        }
-                    }
-                }
-                if skip_init {
-                    continue;
-                }
-                for symbol in frame.symbols() {
-                    write!(trace_text, "{}: ", i).unwrap();
-                    if let Some(name) = symbol.name() {
-                        write!(trace_text, "{}\n", name).unwrap();
-                    } else {
-                        write!(trace_text, "<unknown>\n").unwrap();
-                    }
-                    write!(trace_text, "\tat ").unwrap();
-                    if let Some(file_path) = symbol.filename() {
-                        write!(trace_text, "{}", file_path.display()).unwrap();
-                    } else {
-                        write!(trace_text, "<unknown_file>").unwrap();
-                    }
-                    if let Some(line) = symbol.lineno() {
-                        write!(trace_text, ":{}\n", line).unwrap();
-                    } else {
-                        write!(trace_text, "\n").unwrap();
-                    }
-                }
-            }
-            error!("{}", trace_text);
-        }
-        if let Some(frame) = self.stack().last() {
-            let block = &frame.mir.basic_blocks()[frame.block];
-            let span = if frame.stmt < block.statements.len() {
-                block.statements[frame.stmt].source_info.span
-            } else {
-                block.terminator().source_info.span
-            };
-            let mut err = self.tcx.sess.struct_span_err(span, &e.to_string());
-            for &Frame { instance, span, .. } in self.stack().iter().rev() {
-                if self.tcx.def_key(instance.def_id()).disambiguated_data.data ==
-                    DefPathData::ClosureExpr
-                {
-                    err.span_note(span, "inside call to closure");
-                    continue;
-                }
-                err.span_note(span, &format!("inside call to {}", instance));
-            }
-            err.emit();
-        } else {
-            self.tcx.sess.err(&e.to_string());
-        }
-    }
-}
-
-impl<'tcx> Frame<'tcx> {
-    pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
-        // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
-        self.locals[local.index() - 1].ok_or(EvalErrorKind::DeadLocal.into())
-    }
-
-    fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
-        // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
-        match self.locals[local.index() - 1] {
-            None => err!(DeadLocal),
-            Some(ref mut local) => {
-                *local = value;
-                Ok(())
-            }
-        }
-    }
-
-    pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
-        trace!("{:?} is now live", local);
-
-        let old = self.locals[local.index() - 1];
-        self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored
-        return Ok(old);
-    }
-
-    /// Returns the old value of the local
-    pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
-        trace!("{:?} is now dead", local);
-
-        let old = self.locals[local.index() - 1];
-        self.locals[local.index() - 1] = None;
-        return Ok(old);
-    }
-}
-
-// TODO(solson): Upstream these methods into rustc::ty::layout.
-
-pub(super) trait IntegerExt {
-    fn size(self) -> Size;
-}
-
-impl IntegerExt for layout::Integer {
-    fn size(self) -> Size {
-        use rustc::ty::layout::Integer::*;
-        match self {
-            I1 | I8 => Size::from_bits(8),
-            I16 => Size::from_bits(16),
-            I32 => Size::from_bits(32),
-            I64 => Size::from_bits(64),
-            I128 => Size::from_bits(128),
-        }
-    }
-}
-
-/// FIXME: expose trans::monomorphize::resolve_closure
-pub fn resolve_closure<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    def_id: DefId,
-    substs: ty::ClosureSubsts<'tcx>,
-    requested_kind: ty::ClosureKind,
-) -> ty::Instance<'tcx> {
-    let actual_kind = tcx.closure_kind(def_id);
-    match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
-        Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
-        _ => ty::Instance::new(def_id, substs.substs),
-    }
-}
-
-fn fn_once_adapter_instance<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    closure_did: DefId,
-    substs: ty::ClosureSubsts<'tcx>,
-) -> ty::Instance<'tcx> {
-    debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs);
-    let fn_once = tcx.lang_items().fn_once_trait().unwrap();
-    let call_once = tcx.associated_items(fn_once)
-        .find(|it| it.kind == ty::AssociatedKind::Method)
-        .unwrap()
-        .def_id;
-    let def = ty::InstanceDef::ClosureOnceShim { call_once };
-
-    let self_ty = tcx.mk_closure_from_closure_substs(closure_did, substs);
-
-    let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs);
-    let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
-    assert_eq!(sig.inputs().len(), 1);
-    let substs = tcx.mk_substs(
-        [Kind::from(self_ty), Kind::from(sig.inputs()[0])]
-            .iter()
-            .cloned(),
-    );
-
-    debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
-    ty::Instance { def, substs }
-}
-
-fn needs_fn_once_adapter_shim(
-    actual_closure_kind: ty::ClosureKind,
-    trait_closure_kind: ty::ClosureKind,
-) -> Result<bool, ()> {
-    match (actual_closure_kind, trait_closure_kind) {
-        (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
-        (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
-        (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
-            // No adapter needed.
-            Ok(false)
-        }
-        (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
-            // The closure fn `llfn` is a `fn(&self, ...)`.  We want a
-            // `fn(&mut self, ...)`. In fact, at trans time, these are
-            // basically the same thing, so we can just return llfn.
-            Ok(false)
-        }
-        (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
-        (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
-            // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
-            // self, ...)`.  We want a `fn(self, ...)`. We can produce
-            // this by doing something like:
-            //
-            //     fn call_once(self, ...) { call_mut(&self, ...) }
-            //     fn call_once(mut self, ...) { call_mut(&mut self, ...) }
-            //
-            // These are both the same at trans time.
-            Ok(true)
-        }
-        _ => Err(()),
-    }
-}
-
-/// The point where linking happens. Resolve a (def_id, substs)
-/// pair to an instance.
-pub fn resolve<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    def_id: DefId,
-    substs: &'tcx Substs<'tcx>,
-) -> ty::Instance<'tcx> {
-    debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
-    let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
-        debug!(" => associated item, attempting to find impl");
-        let item = tcx.associated_item(def_id);
-        resolve_associated_item(tcx, &item, trait_def_id, substs)
-    } else {
-        let item_type = def_ty(tcx, def_id, substs);
-        let def = match item_type.sty {
-            ty::TyFnDef(..)
-                if {
-                       let f = item_type.fn_sig(tcx);
-                       f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic
-                   } => {
-                debug!(" => intrinsic");
-                ty::InstanceDef::Intrinsic(def_id)
-            }
-            _ => {
-                if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
-                    let ty = substs.type_at(0);
-                    if needs_drop_glue(tcx, ty) {
-                        debug!(" => nontrivial drop glue");
-                        ty::InstanceDef::DropGlue(def_id, Some(ty))
-                    } else {
-                        debug!(" => trivial drop glue");
-                        ty::InstanceDef::DropGlue(def_id, None)
-                    }
-                } else {
-                    debug!(" => free item");
-                    ty::InstanceDef::Item(def_id)
-                }
-            }
-        };
-        ty::Instance { def, substs }
-    };
-    debug!(
-        "resolve(def_id={:?}, substs={:?}) = {}",
-        def_id,
-        substs,
-        result
-    );
-    result
-}
-
-pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool {
-    assert!(t.is_normalized_for_trans());
-
-    let t = tcx.erase_regions(&t);
-
-    // FIXME (#22815): note that type_needs_drop conservatively
-    // approximates in some cases and may say a type expression
-    // requires drop glue when it actually does not.
-    //
-    // (In this case it is not clear whether any harm is done, i.e.
-    // erroneously returning `true` in some cases where we could have
-    // returned `false` does not appear unsound. The impact on
-    // code quality is unknown at this time.)
-
-    let env = ty::ParamEnv::empty(Reveal::All);
-    if !t.needs_drop(tcx, env) {
-        return false;
-    }
-    match t.sty {
-        ty::TyAdt(def, _) if def.is_box() => {
-            let typ = t.boxed_ty();
-            if !typ.needs_drop(tcx, env) && type_is_sized(tcx, typ) {
-                let layout = t.layout(tcx, ty::ParamEnv::empty(Reveal::All)).unwrap();
-                // `Box<ZeroSizeType>` does not allocate.
-                layout.size(&tcx.data_layout).bytes() != 0
-            } else {
-                true
-            }
-        }
-        _ => true,
-    }
-}
-
-fn resolve_associated_item<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    trait_item: &ty::AssociatedItem,
-    trait_id: DefId,
-    rcvr_substs: &'tcx Substs<'tcx>,
-) -> ty::Instance<'tcx> {
-    let def_id = trait_item.def_id;
-    debug!(
-        "resolve_associated_item(trait_item={:?}, \
-                                    trait_id={:?}, \
-                                    rcvr_substs={:?})",
-        def_id,
-        trait_id,
-        rcvr_substs
-    );
-
-    let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
-    let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::Binder(trait_ref));
-
-    // Now that we know which impl is being used, we can dispatch to
-    // the actual function:
-    match vtbl {
-        ::rustc::traits::VtableImpl(impl_data) => {
-            let (def_id, substs) =
-                ::rustc::traits::find_associated_item(tcx, trait_item, rcvr_substs, &impl_data);
-            let substs = tcx.erase_regions(&substs);
-            ty::Instance::new(def_id, substs)
-        }
-        ::rustc::traits::VtableGenerator(closure_data) => {
-            ty::Instance {
-                def: ty::InstanceDef::Item(closure_data.closure_def_id),
-                substs: closure_data.substs.substs
-            }
-        }
-        ::rustc::traits::VtableClosure(closure_data) => {
-            let trait_closure_kind = tcx.lang_items().fn_trait_kind(trait_id).unwrap();
-            resolve_closure(
-                tcx,
-                closure_data.closure_def_id,
-                closure_data.substs,
-                trait_closure_kind,
-            )
-        }
-        ::rustc::traits::VtableFnPointer(ref data) => {
-            ty::Instance {
-                def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
-                substs: rcvr_substs,
-            }
-        }
-        ::rustc::traits::VtableObject(ref data) => {
-            let index = tcx.get_vtable_index_of_object_method(data, def_id);
-            ty::Instance {
-                def: ty::InstanceDef::Virtual(def_id, index),
-                substs: rcvr_substs,
-            }
-        }
-        ::rustc::traits::VtableBuiltin(..) if Some(trait_id) == tcx.lang_items().clone_trait() => {
-            ty::Instance {
-                def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()),
-                substs: rcvr_substs
-            }
-        }
-        _ => bug!("static call to invalid vtable: {:?}", vtbl),
-    }
-}
-
-pub fn def_ty<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    def_id: DefId,
-    substs: &'tcx Substs<'tcx>,
-) -> Ty<'tcx> {
-    let ty = tcx.type_of(def_id);
-    apply_param_substs(tcx, substs, &ty)
-}
-
-/// Monomorphizes a type from the AST by first applying the in-scope
-/// substitutions and then normalizing any associated types.
-pub fn apply_param_substs<'a, 'tcx, T>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    param_substs: &Substs<'tcx>,
-    value: &T,
-) -> T
-where
-    T: ::rustc::infer::TransNormalize<'tcx>,
-{
-    debug!(
-        "apply_param_substs(param_substs={:?}, value={:?})",
-        param_substs,
-        value
-    );
-    let substituted = value.subst(tcx, param_substs);
-    let substituted = tcx.erase_regions(&substituted);
-    AssociatedTypeNormalizer { tcx }.fold(&substituted)
-}
-
-
-struct AssociatedTypeNormalizer<'a, 'tcx: 'a> {
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-}
-
-impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> {
-    fn fold<T: TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
-        if !value.has_projections() {
-            value.clone()
-        } else {
-            value.fold_with(self)
-        }
-    }
-}
-
-impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> {
-    fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
-        self.tcx
-    }
-
-    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        if !ty.has_projections() {
-            ty
-        } else {
-            self.tcx.normalize_associated_type(&ty)
-        }
-    }
-}
-
-fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
-    // generics are weird, don't run this function on a generic
-    assert!(!ty.needs_subst());
-    ty.is_sized(tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
-}
-
-pub fn resolve_drop_in_place<'a, 'tcx>(
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    ty: Ty<'tcx>,
-) -> ty::Instance<'tcx> {
-    let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem);
-    let substs = tcx.intern_substs(&[Kind::from(ty)]);
-    resolve(tcx, def_id, substs)
-}
diff --git a/src/librustc_mir/interpret/lvalue.rs b/src/librustc_mir/interpret/lvalue.rs
deleted file mode 100644 (file)
index 36b396a..0000000
+++ /dev/null
@@ -1,506 +0,0 @@
-use rustc::mir;
-use rustc::ty::layout::{Size, Align};
-use rustc::ty::{self, Ty};
-use rustc_data_structures::indexed_vec::Idx;
-
-use super::{EvalResult, EvalContext, MemoryPointer, PrimVal, Value, Pointer, Machine, PtrAndAlign, ValTy};
-
-#[derive(Copy, Clone, Debug)]
-pub enum Lvalue {
-    /// An lvalue referring to a value allocated in the `Memory` system.
-    Ptr {
-        /// An lvalue may have an invalid (integral or undef) pointer,
-        /// since it might be turned back into a reference
-        /// before ever being dereferenced.
-        ptr: PtrAndAlign,
-        extra: LvalueExtra,
-    },
-
-    /// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
-    /// a Mir local index.
-    Local { frame: usize, local: mir::Local },
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum LvalueExtra {
-    None,
-    Length(u64),
-    Vtable(MemoryPointer),
-    DowncastVariant(usize),
-}
-
-/// Uniquely identifies a specific constant or static.
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
-pub struct GlobalId<'tcx> {
-    /// For a constant or static, the `Instance` of the item itself.
-    /// For a promoted global, the `Instance` of the function they belong to.
-    pub instance: ty::Instance<'tcx>,
-
-    /// The index for promoted globals within their function's `Mir`.
-    pub promoted: Option<mir::Promoted>,
-}
-
-impl<'tcx> Lvalue {
-    /// Produces an Lvalue that will error if attempted to be read from
-    pub fn undef() -> Self {
-        Self::from_primval_ptr(PrimVal::Undef.into())
-    }
-
-    pub fn from_primval_ptr(ptr: Pointer) -> Self {
-        Lvalue::Ptr {
-            ptr: PtrAndAlign { ptr, aligned: true },
-            extra: LvalueExtra::None,
-        }
-    }
-
-    pub fn from_ptr(ptr: MemoryPointer) -> Self {
-        Self::from_primval_ptr(ptr.into())
-    }
-
-    pub(super) fn to_ptr_extra_aligned(self) -> (PtrAndAlign, LvalueExtra) {
-        match self {
-            Lvalue::Ptr { ptr, extra } => (ptr, extra),
-            _ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
-
-        }
-    }
-
-    pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
-        let (ptr, extra) = self.to_ptr_extra_aligned();
-        // At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
-        // and no matter where it came from, it now must be aligned.
-        assert_eq!(extra, LvalueExtra::None);
-        ptr.to_ptr()
-    }
-
-    pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
-        match ty.sty {
-            ty::TyArray(elem, n) => (elem, n.val.to_const_int().unwrap().to_u64().unwrap() as u64),
-
-            ty::TySlice(elem) => {
-                match self {
-                    Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len),
-                    _ => {
-                        bug!(
-                            "elem_ty_and_len of a TySlice given non-slice lvalue: {:?}",
-                            self
-                        )
-                    }
-                }
-            }
-
-            _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
-        }
-    }
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    /// Reads a value from the lvalue without going through the intermediate step of obtaining
-    /// a `miri::Lvalue`
-    pub fn try_read_lvalue(
-        &mut self,
-        lvalue: &mir::Lvalue<'tcx>,
-    ) -> EvalResult<'tcx, Option<Value>> {
-        use rustc::mir::Lvalue::*;
-        match *lvalue {
-            // Might allow this in the future, right now there's no way to do this from Rust code anyway
-            Local(mir::RETURN_POINTER) => err!(ReadFromReturnPointer),
-            // Directly reading a local will always succeed
-            Local(local) => self.frame().get_local(local).map(Some),
-            // Directly reading a static will always succeed
-            Static(ref static_) => {
-                let instance = ty::Instance::mono(self.tcx, static_.def_id);
-                let cid = GlobalId {
-                    instance,
-                    promoted: None,
-                };
-                Ok(Some(Value::ByRef(
-                    *self.globals.get(&cid).expect("global not cached"),
-                )))
-            }
-            Projection(ref proj) => self.try_read_lvalue_projection(proj),
-        }
-    }
-
-    fn try_read_lvalue_projection(
-        &mut self,
-        proj: &mir::LvalueProjection<'tcx>,
-    ) -> EvalResult<'tcx, Option<Value>> {
-        use rustc::mir::ProjectionElem::*;
-        let base = match self.try_read_lvalue(&proj.base)? {
-            Some(base) => base,
-            None => return Ok(None),
-        };
-        let base_ty = self.lvalue_ty(&proj.base);
-        match proj.elem {
-            Field(field, _) => match (field.index(), base) {
-                // the only field of a struct
-                (0, Value::ByVal(val)) => Ok(Some(Value::ByVal(val))),
-                // split fat pointers, 2 element tuples, ...
-                (0...1, Value::ByValPair(a, b)) if self.get_field_count(base_ty)? == 2 => {
-                    let val = [a, b][field.index()];
-                    Ok(Some(Value::ByVal(val)))
-                },
-                // the only field of a struct is a fat pointer
-                (0, Value::ByValPair(..)) => Ok(Some(base)),
-                _ => Ok(None),
-            },
-            // The NullablePointer cases should work fine, need to take care for normal enums
-            Downcast(..) |
-            Subslice { .. } |
-            // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
-            ConstantIndex { .. } | Index(_) |
-            // No way to optimize this projection any better than the normal lvalue path
-            Deref => Ok(None),
-        }
-    }
-
-    /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
-    pub(super) fn eval_and_read_lvalue(
-        &mut self,
-        lvalue: &mir::Lvalue<'tcx>,
-    ) -> EvalResult<'tcx, Value> {
-        // Shortcut for things like accessing a fat pointer's field,
-        // which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory
-        // and returning an `Lvalue::Ptr` to it
-        if let Some(val) = self.try_read_lvalue(lvalue)? {
-            return Ok(val);
-        }
-        let lvalue = self.eval_lvalue(lvalue)?;
-        self.read_lvalue(lvalue)
-    }
-
-    pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> {
-        match lvalue {
-            Lvalue::Ptr { ptr, extra } => {
-                assert_eq!(extra, LvalueExtra::None);
-                Ok(Value::ByRef(ptr))
-            }
-            Lvalue::Local { frame, local } => self.stack[frame].get_local(local),
-        }
-    }
-
-    pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> {
-        use rustc::mir::Lvalue::*;
-        let lvalue = match *mir_lvalue {
-            Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
-            Local(local) => Lvalue::Local {
-                frame: self.cur_frame(),
-                local,
-            },
-
-            Static(ref static_) => {
-                let instance = ty::Instance::mono(self.tcx, static_.def_id);
-                let gid = GlobalId {
-                    instance,
-                    promoted: None,
-                };
-                Lvalue::Ptr {
-                    ptr: *self.globals.get(&gid).expect("uncached global"),
-                    extra: LvalueExtra::None,
-                }
-            }
-
-            Projection(ref proj) => {
-                let ty = self.lvalue_ty(&proj.base);
-                let lvalue = self.eval_lvalue(&proj.base)?;
-                return self.eval_lvalue_projection(lvalue, ty, &proj.elem);
-            }
-        };
-
-        if log_enabled!(::log::LogLevel::Trace) {
-            self.dump_local(lvalue);
-        }
-
-        Ok(lvalue)
-    }
-
-    pub fn lvalue_field(
-        &mut self,
-        base: Lvalue,
-        field: mir::Field,
-        base_ty: Ty<'tcx>,
-        field_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, Lvalue> {
-        use rustc::ty::layout::Layout::*;
-
-        let base_layout = self.type_layout(base_ty)?;
-        let field_index = field.index();
-        let (offset, packed) = match *base_layout {
-            Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed),
-
-            // mir optimizations treat single variant enums as structs
-            General { ref variants, .. } if variants.len() == 1 => {
-                (variants[0].offsets[field_index], variants[0].packed)
-            }
-
-            General { ref variants, .. } => {
-                let (_, base_extra) = base.to_ptr_extra_aligned();
-                if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
-                    // +1 for the discriminant, which is field 0
-                    assert!(!variants[variant_idx].packed);
-                    (variants[variant_idx].offsets[field_index + 1], false)
-                } else {
-                    bug!("field access on enum had no variant index");
-                }
-            }
-
-            RawNullablePointer { .. } => {
-                assert_eq!(field_index, 0);
-                return Ok(base);
-            }
-
-            StructWrappedNullablePointer { ref nonnull, .. } => {
-                (nonnull.offsets[field_index], nonnull.packed)
-            }
-
-            UntaggedUnion { .. } => return Ok(base),
-
-            Vector { element, count } => {
-                let field = field_index as u64;
-                assert!(field < count);
-                let elem_size = element.size(&self.tcx.data_layout).bytes();
-                (Size::from_bytes(field * elem_size), false)
-            }
-
-            // We treat arrays + fixed sized indexing like field accesses
-            Array { .. } => {
-                let field = field_index as u64;
-                let elem_size = match base_ty.sty {
-                    ty::TyArray(elem_ty, n) => {
-                        assert!(field < n.val.to_const_int().unwrap().to_u64().unwrap() as u64);
-                        self.type_size(elem_ty)?.expect("array elements are sized") as u64
-                    }
-                    _ => {
-                        bug!(
-                            "lvalue_field: got Array layout but non-array type {:?}",
-                            base_ty
-                        )
-                    }
-                };
-                (Size::from_bytes(field * elem_size), false)
-            }
-
-            FatPointer { .. } => {
-                let bytes = field_index as u64 * self.memory.pointer_size();
-                let offset = Size::from_bytes(bytes);
-                (offset, false)
-            }
-
-            _ => bug!("field access on non-product type: {:?}", base_layout),
-        };
-
-        // Do not allocate in trivial cases
-        let (base_ptr, base_extra) = match base {
-            Lvalue::Ptr { ptr, extra } => (ptr, extra),
-            Lvalue::Local { frame, local } => {
-                match self.stack[frame].get_local(local)? {
-                    // in case the type has a single field, just return the value
-                    Value::ByVal(_)
-                        if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(
-                            false,
-                        ) => {
-                        assert_eq!(
-                            offset.bytes(),
-                            0,
-                            "ByVal can only have 1 non zst field with offset 0"
-                        );
-                        return Ok(base);
-                    }
-                    Value::ByRef { .. } |
-                    Value::ByValPair(..) |
-                    Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
-                }
-            }
-        };
-
-        let offset = match base_extra {
-            LvalueExtra::Vtable(tab) => {
-                let (_, align) = self.size_and_align_of_dst(
-                    base_ty,
-                    base_ptr.ptr.to_value_with_vtable(tab),
-                )?;
-                offset
-                    .abi_align(Align::from_bytes(align, align).unwrap())
-                    .bytes()
-            }
-            _ => offset.bytes(),
-        };
-
-        let mut ptr = base_ptr.offset(offset, &self)?;
-        // if we were unaligned, stay unaligned
-        // no matter what we were, if we are packed, we must not be aligned anymore
-        ptr.aligned &= !packed;
-
-        let field_ty = self.monomorphize(field_ty, self.substs());
-
-        let extra = if self.type_is_sized(field_ty) {
-            LvalueExtra::None
-        } else {
-            match base_extra {
-                LvalueExtra::None => bug!("expected fat pointer"),
-                LvalueExtra::DowncastVariant(..) => {
-                    bug!("Rust doesn't support unsized fields in enum variants")
-                }
-                LvalueExtra::Vtable(_) |
-                LvalueExtra::Length(_) => {}
-            }
-            base_extra
-        };
-
-        Ok(Lvalue::Ptr { ptr, extra })
-    }
-
-    pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> {
-        Ok(match self.tcx.struct_tail(ty).sty {
-            ty::TyDynamic(..) => {
-                let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
-                Lvalue::Ptr {
-                    ptr: PtrAndAlign { ptr, aligned: true },
-                    extra: LvalueExtra::Vtable(vtable),
-                }
-            }
-            ty::TyStr | ty::TySlice(_) => {
-                let (ptr, len) = val.into_slice(&self.memory)?;
-                Lvalue::Ptr {
-                    ptr: PtrAndAlign { ptr, aligned: true },
-                    extra: LvalueExtra::Length(len),
-                }
-            }
-            _ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?),
-        })
-    }
-
-    pub(super) fn lvalue_index(
-        &mut self,
-        base: Lvalue,
-        outer_ty: Ty<'tcx>,
-        n: u64,
-    ) -> EvalResult<'tcx, Lvalue> {
-        // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
-        let base = self.force_allocation(base)?;
-        let (base_ptr, _) = base.to_ptr_extra_aligned();
-
-        let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
-        let elem_size = self.type_size(elem_ty)?.expect(
-            "slice element must be sized",
-        );
-        assert!(
-            n < len,
-            "Tried to access element {} of array/slice with length {}",
-            n,
-            len
-        );
-        let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
-        Ok(Lvalue::Ptr {
-            ptr,
-            extra: LvalueExtra::None,
-        })
-    }
-
-    pub(super) fn eval_lvalue_projection(
-        &mut self,
-        base: Lvalue,
-        base_ty: Ty<'tcx>,
-        proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
-    ) -> EvalResult<'tcx, Lvalue> {
-        use rustc::mir::ProjectionElem::*;
-        let (ptr, extra) = match *proj_elem {
-            Field(field, field_ty) => {
-                return self.lvalue_field(base, field, base_ty, field_ty);
-            }
-
-            Downcast(_, variant) => {
-                let base_layout = self.type_layout(base_ty)?;
-                // FIXME(solson)
-                let base = self.force_allocation(base)?;
-                let (base_ptr, base_extra) = base.to_ptr_extra_aligned();
-
-                use rustc::ty::layout::Layout::*;
-                let extra = match *base_layout {
-                    General { .. } => LvalueExtra::DowncastVariant(variant),
-                    RawNullablePointer { .. } |
-                    StructWrappedNullablePointer { .. } => base_extra,
-                    _ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
-                };
-                (base_ptr, extra)
-            }
-
-            Deref => {
-                let val = self.read_lvalue(base)?;
-
-                let pointee_type = match base_ty.sty {
-                    ty::TyRawPtr(ref tam) |
-                    ty::TyRef(_, ref tam) => tam.ty,
-                    ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
-                    _ => bug!("can only deref pointer types"),
-                };
-
-                trace!("deref to {} on {:?}", pointee_type, val);
-
-                return self.val_to_lvalue(val, pointee_type);
-            }
-
-            Index(local) => {
-                let value = self.frame().get_local(local)?;
-                let ty = self.tcx.types.usize;
-                let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
-                return self.lvalue_index(base, base_ty, n);
-            }
-
-            ConstantIndex {
-                offset,
-                min_length,
-                from_end,
-            } => {
-                // FIXME(solson)
-                let base = self.force_allocation(base)?;
-                let (base_ptr, _) = base.to_ptr_extra_aligned();
-
-                let (elem_ty, n) = base.elem_ty_and_len(base_ty);
-                let elem_size = self.type_size(elem_ty)?.expect(
-                    "sequence element must be sized",
-                );
-                assert!(n >= min_length as u64);
-
-                let index = if from_end {
-                    n - u64::from(offset)
-                } else {
-                    u64::from(offset)
-                };
-
-                let ptr = base_ptr.offset(index * elem_size, &self)?;
-                (ptr, LvalueExtra::None)
-            }
-
-            Subslice { from, to } => {
-                // FIXME(solson)
-                let base = self.force_allocation(base)?;
-                let (base_ptr, _) = base.to_ptr_extra_aligned();
-
-                let (elem_ty, n) = base.elem_ty_and_len(base_ty);
-                let elem_size = self.type_size(elem_ty)?.expect(
-                    "slice element must be sized",
-                );
-                assert!(u64::from(from) <= n - u64::from(to));
-                let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
-                // sublicing arrays produces arrays
-                let extra = if self.type_is_sized(base_ty) {
-                    LvalueExtra::None
-                } else {
-                    LvalueExtra::Length(n - u64::from(to) - u64::from(from))
-                };
-                (ptr, extra)
-            }
-        };
-
-        Ok(Lvalue::Ptr { ptr, extra })
-    }
-
-    pub fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
-        self.monomorphize(
-            lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx),
-            self.substs(),
-        )
-    }
-}
diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs
deleted file mode 100644 (file)
index 3df5d1b..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-//! This module contains everything needed to instantiate an interpreter.
-//! This separation exists to ensure that no fancy miri features like
-//! interpreting common C functions leak into CTFE.
-
-use super::{EvalResult, EvalContext, Lvalue, PrimVal, ValTy};
-
-use rustc::{mir, ty};
-use syntax::codemap::Span;
-use syntax::ast::Mutability;
-
-/// Methods of this trait signifies a point where CTFE evaluation would fail
-/// and some use case dependent behaviour can instead be applied
-pub trait Machine<'tcx>: Sized {
-    /// Additional data that can be accessed via the EvalContext
-    type Data;
-
-    /// Additional data that can be accessed via the Memory
-    type MemoryData;
-
-    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
-    type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone;
-
-    /// Entry point to all function calls.
-    ///
-    /// Returns Ok(true) when the function was handled completely
-    /// e.g. due to missing mir
-    ///
-    /// Returns Ok(false) if a new stack frame was pushed
-    fn eval_fn_call<'a>(
-        ecx: &mut EvalContext<'a, 'tcx, Self>,
-        instance: ty::Instance<'tcx>,
-        destination: Option<(Lvalue, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
-        span: Span,
-        sig: ty::FnSig<'tcx>,
-    ) -> EvalResult<'tcx, bool>;
-
-    /// directly process an intrinsic without pushing a stack frame.
-    fn call_intrinsic<'a>(
-        ecx: &mut EvalContext<'a, 'tcx, Self>,
-        instance: ty::Instance<'tcx>,
-        args: &[ValTy<'tcx>],
-        dest: Lvalue,
-        dest_ty: ty::Ty<'tcx>,
-        dest_layout: &'tcx ty::layout::Layout,
-        target: mir::BasicBlock,
-    ) -> EvalResult<'tcx>;
-
-    /// Called for all binary operations except on float types.
-    ///
-    /// Returns `None` if the operation should be handled by the integer
-    /// op code in order to share more code between machines
-    ///
-    /// Returns a (value, overflowed) pair if the operation succeeded
-    fn try_ptr_op<'a>(
-        ecx: &EvalContext<'a, 'tcx, Self>,
-        bin_op: mir::BinOp,
-        left: PrimVal,
-        left_ty: ty::Ty<'tcx>,
-        right: PrimVal,
-        right_ty: ty::Ty<'tcx>,
-    ) -> EvalResult<'tcx, Option<(PrimVal, bool)>>;
-
-    /// Called when trying to mark machine defined `MemoryKinds` as static
-    fn mark_static_initialized(m: Self::MemoryKinds) -> EvalResult<'tcx>;
-
-    /// Heap allocations via the `box` keyword
-    ///
-    /// Returns a pointer to the allocated memory
-    fn box_alloc<'a>(
-        ecx: &mut EvalContext<'a, 'tcx, Self>,
-        ty: ty::Ty<'tcx>,
-        dest: Lvalue,
-    ) -> EvalResult<'tcx>;
-
-    /// Called when trying to access a global declared with a `linkage` attribute
-    fn global_item_with_linkage<'a>(
-        ecx: &mut EvalContext<'a, 'tcx, Self>,
-        instance: ty::Instance<'tcx>,
-        mutability: Mutability,
-    ) -> EvalResult<'tcx>;
-}
diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs
deleted file mode 100644 (file)
index bde7929..0000000
+++ /dev/null
@@ -1,1700 +0,0 @@
-use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
-use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
-use std::{fmt, iter, ptr, mem, io};
-use std::cell::Cell;
-
-use rustc::ty::Instance;
-use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
-use syntax::ast::Mutability;
-use rustc::middle::region;
-
-use super::{EvalResult, EvalErrorKind, PrimVal, Pointer, EvalContext, DynamicLifetime, Machine,
-            RangeMap, AbsLvalue};
-
-////////////////////////////////////////////////////////////////////////////////
-// Locks
-////////////////////////////////////////////////////////////////////////////////
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum AccessKind {
-    Read,
-    Write,
-}
-
-/// Information about a lock that is currently held.
-#[derive(Clone, Debug)]
-struct LockInfo<'tcx> {
-    /// Stores for which lifetimes (of the original write lock) we got
-    /// which suspensions.
-    suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
-    /// The current state of the lock that's actually effective.
-    active: Lock,
-}
-
-/// Write locks are identified by a stack frame and an "abstract" (untyped) lvalue.
-/// It may be tempting to use the lifetime as identifier, but that does not work
-/// for two reasons:
-/// * First of all, due to subtyping, the same lock may be referred to with different
-///   lifetimes.
-/// * Secondly, different write locks may actually have the same lifetime.  See `test2`
-///   in `run-pass/many_shr_bor.rs`.
-/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
-/// considers the path frozen and hence the Id remains stable.
-#[derive(Clone, Debug, PartialEq, Eq, Hash)]
-struct WriteLockId<'tcx> {
-    frame: usize,
-    path: AbsLvalue<'tcx>,
-}
-
-#[derive(Clone, Debug, PartialEq)]
-pub enum Lock {
-    NoLock,
-    WriteLock(DynamicLifetime),
-    ReadLock(Vec<DynamicLifetime>), // This should never be empty -- that would be a read lock held and nobody there to release it...
-}
-use self::Lock::*;
-
-impl<'tcx> Default for LockInfo<'tcx> {
-    fn default() -> Self {
-        LockInfo::new(NoLock)
-    }
-}
-
-impl<'tcx> LockInfo<'tcx> {
-    fn new(lock: Lock) -> LockInfo<'tcx> {
-        LockInfo {
-            suspended: HashMap::new(),
-            active: lock,
-        }
-    }
-
-    fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
-        use self::AccessKind::*;
-        match (&self.active, access) {
-            (&NoLock, _) => true,
-            (&ReadLock(ref lfts), Read) => {
-                assert!(!lfts.is_empty(), "Someone left an empty read lock behind.");
-                // Read access to read-locked region is okay, no matter who's holding the read lock.
-                true
-            }
-            (&WriteLock(ref lft), _) => {
-                // All access is okay if we are the ones holding it
-                Some(lft.frame) == frame
-            }
-            _ => false, // Nothing else is okay.
-        }
-    }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Allocations and pointers
-////////////////////////////////////////////////////////////////////////////////
-
-#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
-pub struct AllocId(u64);
-
-#[derive(Debug)]
-pub enum AllocIdKind {
-    /// We can't ever have more than `usize::max_value` functions at the same time
-    /// since we never "deallocate" functions
-    Function(usize),
-    /// Locals and heap allocations (also statics for now, but those will get their
-    /// own variant soonish).
-    Runtime(u64),
-}
-
-impl AllocIdKind {
-    pub fn into_alloc_id(self) -> AllocId {
-        match self {
-            AllocIdKind::Function(n) => AllocId(n as u64),
-            AllocIdKind::Runtime(n) => AllocId((1 << 63) | n),
-        }
-    }
-}
-
-impl AllocId {
-    /// Currently yields the top bit to discriminate the `AllocIdKind`s
-    fn discriminant(self) -> u64 {
-        self.0 >> 63
-    }
-    /// Yields everything but the discriminant bits
-    pub fn index(self) -> u64 {
-        self.0 & ((1 << 63) - 1)
-    }
-    pub fn into_alloc_id_kind(self) -> AllocIdKind {
-        match self.discriminant() {
-            0 => AllocIdKind::Function(self.index() as usize),
-            1 => AllocIdKind::Runtime(self.index()),
-            n => bug!("got discriminant {} for AllocId", n),
-        }
-    }
-}
-
-impl fmt::Display for AllocId {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{:?}", self.into_alloc_id_kind())
-    }
-}
-
-impl fmt::Debug for AllocId {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{:?}", self.into_alloc_id_kind())
-    }
-}
-
-#[derive(Debug)]
-pub struct Allocation<'tcx, M> {
-    /// The actual bytes of the allocation.
-    /// Note that the bytes of a pointer represent the offset of the pointer
-    pub bytes: Vec<u8>,
-    /// Maps from byte addresses to allocations.
-    /// Only the first byte of a pointer is inserted into the map.
-    pub relocations: BTreeMap<u64, AllocId>,
-    /// Denotes undefined memory. Reading from undefined memory is forbidden in miri
-    pub undef_mask: UndefMask,
-    /// The alignment of the allocation to detect unaligned reads.
-    pub align: u64,
-    /// Whether the allocation may be modified.
-    pub mutable: Mutability,
-    /// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this
-    /// allocation is modified or deallocated in the future.
-    /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
-    pub kind: MemoryKind<M>,
-    /// Memory regions that are locked by some function
-    locks: RangeMap<LockInfo<'tcx>>,
-}
-
-impl<'tcx, M> Allocation<'tcx, M> {
-    fn check_locks(
-        &self,
-        frame: Option<usize>,
-        offset: u64,
-        len: u64,
-        access: AccessKind,
-    ) -> Result<(), LockInfo<'tcx>> {
-        if len == 0 {
-            return Ok(());
-        }
-        for lock in self.locks.iter(offset, len) {
-            // Check if the lock is in conflict with the access.
-            if !lock.access_permitted(frame, access) {
-                return Err(lock.clone());
-            }
-        }
-        Ok(())
-    }
-}
-
-#[derive(Debug, PartialEq, Copy, Clone)]
-pub enum MemoryKind<T> {
-    /// Error if deallocated except during a stack pop
-    Stack,
-    /// Static in the process of being initialized.
-    /// The difference is important: An immutable static referring to a
-    /// mutable initialized static will freeze immutably and would not
-    /// be able to distinguish already initialized statics from uninitialized ones
-    UninitializedStatic,
-    /// May never be deallocated
-    Static,
-    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
-    Machine(T),
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub struct MemoryPointer {
-    pub alloc_id: AllocId,
-    pub offset: u64,
-}
-
-impl<'tcx> MemoryPointer {
-    pub fn new(alloc_id: AllocId, offset: u64) -> Self {
-        MemoryPointer { alloc_id, offset }
-    }
-
-    pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
-        MemoryPointer::new(
-            self.alloc_id,
-            cx.data_layout().wrapping_signed_offset(self.offset, i),
-        )
-    }
-
-    pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
-        let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
-        (MemoryPointer::new(self.alloc_id, res), over)
-    }
-
-    pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
-        Ok(MemoryPointer::new(
-            self.alloc_id,
-            cx.data_layout().signed_offset(self.offset, i)?,
-        ))
-    }
-
-    pub fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
-        let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
-        (MemoryPointer::new(self.alloc_id, res), over)
-    }
-
-    pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
-        Ok(MemoryPointer::new(
-            self.alloc_id,
-            cx.data_layout().offset(self.offset, i)?,
-        ))
-    }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Top-level interpreter memory
-////////////////////////////////////////////////////////////////////////////////
-
-pub struct Memory<'a, 'tcx, M: Machine<'tcx>> {
-    /// Additional data required by the Machine
-    pub data: M::MemoryData,
-
-    /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
-    alloc_map: HashMap<u64, Allocation<'tcx, M::MemoryKinds>>,
-
-    /// The AllocId to assign to the next new regular allocation. Always incremented, never gets smaller.
-    next_alloc_id: u64,
-
-    /// Number of virtual bytes allocated.
-    memory_usage: u64,
-
-    /// Maximum number of virtual bytes that may be allocated.
-    memory_size: u64,
-
-    /// Function "allocations". They exist solely so pointers have something to point to, and
-    /// we can figure out what they point to.
-    functions: Vec<Instance<'tcx>>,
-
-    /// Inverse map of `functions` so we don't allocate a new pointer every time we need one
-    function_alloc_cache: HashMap<Instance<'tcx>, AllocId>,
-
-    /// Target machine data layout to emulate.
-    pub layout: &'a TargetDataLayout,
-
-    /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
-    /// allocations for string and bytestring literals.
-    literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
-
-    /// To avoid having to pass flags to every single memory access, we have some global state saying whether
-    /// alignment checking is currently enforced for read and/or write accesses.
-    reads_are_aligned: Cell<bool>,
-    writes_are_aligned: Cell<bool>,
-
-    /// The current stack frame.  Used to check accesses against locks.
-    pub(super) cur_frame: usize,
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    pub fn new(layout: &'a TargetDataLayout, max_memory: u64, data: M::MemoryData) -> Self {
-        Memory {
-            data,
-            alloc_map: HashMap::new(),
-            functions: Vec::new(),
-            function_alloc_cache: HashMap::new(),
-            next_alloc_id: 0,
-            layout,
-            memory_size: max_memory,
-            memory_usage: 0,
-            literal_alloc_cache: HashMap::new(),
-            reads_are_aligned: Cell::new(true),
-            writes_are_aligned: Cell::new(true),
-            cur_frame: usize::max_value(),
-        }
-    }
-
-    pub fn allocations<'x>(
-        &'x self,
-    ) -> impl Iterator<Item = (AllocId, &'x Allocation<M::MemoryKinds>)> {
-        self.alloc_map.iter().map(|(&id, alloc)| {
-            (AllocIdKind::Runtime(id).into_alloc_id(), alloc)
-        })
-    }
-
-    pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer {
-        if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) {
-            return MemoryPointer::new(alloc_id, 0);
-        }
-        let id = self.functions.len();
-        debug!("creating fn ptr: {}", id);
-        self.functions.push(instance);
-        let alloc_id = AllocIdKind::Function(id).into_alloc_id();
-        self.function_alloc_cache.insert(instance, alloc_id);
-        MemoryPointer::new(alloc_id, 0)
-    }
-
-    pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> {
-        if let Some(&alloc_id) = self.literal_alloc_cache.get(bytes) {
-            return Ok(MemoryPointer::new(alloc_id, 0));
-        }
-
-        let ptr = self.allocate(
-            bytes.len() as u64,
-            1,
-            MemoryKind::UninitializedStatic,
-        )?;
-        self.write_bytes(ptr.into(), bytes)?;
-        self.mark_static_initalized(
-            ptr.alloc_id,
-            Mutability::Immutable,
-        )?;
-        self.literal_alloc_cache.insert(
-            bytes.to_vec(),
-            ptr.alloc_id,
-        );
-        Ok(ptr)
-    }
-
-    pub fn allocate(
-        &mut self,
-        size: u64,
-        align: u64,
-        kind: MemoryKind<M::MemoryKinds>,
-    ) -> EvalResult<'tcx, MemoryPointer> {
-        assert_ne!(align, 0);
-        assert!(align.is_power_of_two());
-
-        if self.memory_size - self.memory_usage < size {
-            return err!(OutOfMemory {
-                allocation_size: size,
-                memory_size: self.memory_size,
-                memory_usage: self.memory_usage,
-            });
-        }
-        self.memory_usage += size;
-        assert_eq!(size as usize as u64, size);
-        let alloc = Allocation {
-            bytes: vec![0; size as usize],
-            relocations: BTreeMap::new(),
-            undef_mask: UndefMask::new(size),
-            align,
-            kind,
-            mutable: Mutability::Mutable,
-            locks: RangeMap::new(),
-        };
-        let id = self.next_alloc_id;
-        self.next_alloc_id += 1;
-        self.alloc_map.insert(id, alloc);
-        Ok(MemoryPointer::new(
-            AllocIdKind::Runtime(id).into_alloc_id(),
-            0,
-        ))
-    }
-
-    pub fn reallocate(
-        &mut self,
-        ptr: MemoryPointer,
-        old_size: u64,
-        old_align: u64,
-        new_size: u64,
-        new_align: u64,
-        kind: MemoryKind<M::MemoryKinds>,
-    ) -> EvalResult<'tcx, MemoryPointer> {
-        use std::cmp::min;
-
-        if ptr.offset != 0 {
-            return err!(ReallocateNonBasePtr);
-        }
-        if let Ok(alloc) = self.get(ptr.alloc_id) {
-            if alloc.kind != kind {
-                return err!(ReallocatedWrongMemoryKind(
-                    format!("{:?}", alloc.kind),
-                    format!("{:?}", kind),
-                ));
-            }
-        }
-
-        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc"
-        let new_ptr = self.allocate(new_size, new_align, kind)?;
-        self.copy(
-            ptr.into(),
-            new_ptr.into(),
-            min(old_size, new_size),
-            min(old_align, new_align),
-            /*nonoverlapping*/
-            true,
-        )?;
-        self.deallocate(ptr, Some((old_size, old_align)), kind)?;
-
-        Ok(new_ptr)
-    }
-
-    pub fn deallocate(
-        &mut self,
-        ptr: MemoryPointer,
-        size_and_align: Option<(u64, u64)>,
-        kind: MemoryKind<M::MemoryKinds>,
-    ) -> EvalResult<'tcx> {
-        if ptr.offset != 0 {
-            return err!(DeallocateNonBasePtr);
-        }
-
-        let alloc_id = match ptr.alloc_id.into_alloc_id_kind() {
-            AllocIdKind::Function(_) => {
-                return err!(DeallocatedWrongMemoryKind(
-                    "function".to_string(),
-                    format!("{:?}", kind),
-                ))
-            }
-            AllocIdKind::Runtime(id) => id,
-        };
-
-        let alloc = match self.alloc_map.remove(&alloc_id) {
-            Some(alloc) => alloc,
-            None => return err!(DoubleFree),
-        };
-
-        // It is okay for us to still holds locks on deallocation -- for example, we could store data we own
-        // in a local, and the local could be deallocated (from StorageDead) before the function returns.
-        // However, we should check *something*.  For now, we make sure that there is no conflicting write
-        // lock by another frame.  We *have* to permit deallocation if we hold a read lock.
-        // TODO: Figure out the exact rules here.
-        alloc
-            .check_locks(
-                Some(self.cur_frame),
-                0,
-                alloc.bytes.len() as u64,
-                AccessKind::Read,
-            )
-            .map_err(|lock| {
-                EvalErrorKind::DeallocatedLockedMemory {
-                    ptr,
-                    lock: lock.active,
-                }
-            })?;
-
-        if alloc.kind != kind {
-            return err!(DeallocatedWrongMemoryKind(
-                format!("{:?}", alloc.kind),
-                format!("{:?}", kind),
-            ));
-        }
-        if let Some((size, align)) = size_and_align {
-            if size != alloc.bytes.len() as u64 || align != alloc.align {
-                return err!(IncorrectAllocationInformation);
-            }
-        }
-
-        self.memory_usage -= alloc.bytes.len() as u64;
-        debug!("deallocated : {}", ptr.alloc_id);
-
-        Ok(())
-    }
-
-    pub fn pointer_size(&self) -> u64 {
-        self.layout.pointer_size.bytes()
-    }
-
-    pub fn endianess(&self) -> layout::Endian {
-        self.layout.endian
-    }
-
-    /// Check that the pointer is aligned AND non-NULL.
-    pub fn check_align(&self, ptr: Pointer, align: u64, access: Option<AccessKind>) -> EvalResult<'tcx> {
-        // Check non-NULL/Undef, extract offset
-        let (offset, alloc_align) = match ptr.into_inner_primval() {
-            PrimVal::Ptr(ptr) => {
-                let alloc = self.get(ptr.alloc_id)?;
-                (ptr.offset, alloc.align)
-            }
-            PrimVal::Bytes(bytes) => {
-                let v = ((bytes as u128) % (1 << self.pointer_size())) as u64;
-                if v == 0 {
-                    return err!(InvalidNullPointerUsage);
-                }
-                (v, align) // the base address if the "integer allocation" is 0 and hence always aligned
-            }
-            PrimVal::Undef => return err!(ReadUndefBytes),
-        };
-        // See if alignment checking is disabled
-        let enforce_alignment = match access {
-            Some(AccessKind::Read) => self.reads_are_aligned.get(),
-            Some(AccessKind::Write) => self.writes_are_aligned.get(),
-            None => true,
-        };
-        if !enforce_alignment {
-            return Ok(());
-        }
-        // Check alignment
-        if alloc_align < align {
-            return err!(AlignmentCheckFailed {
-                has: alloc_align,
-                required: align,
-            });
-        }
-        if offset % align == 0 {
-            Ok(())
-        } else {
-            err!(AlignmentCheckFailed {
-                has: offset % align,
-                required: align,
-            })
-        }
-    }
-
-    pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> {
-        let alloc = self.get(ptr.alloc_id)?;
-        let allocation_size = alloc.bytes.len() as u64;
-        if ptr.offset > allocation_size {
-            return err!(PointerOutOfBounds {
-                ptr,
-                access,
-                allocation_size,
-            });
-        }
-        Ok(())
-    }
-}
-
-/// Locking
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    pub(crate) fn check_locks(
-        &self,
-        ptr: MemoryPointer,
-        len: u64,
-        access: AccessKind,
-    ) -> EvalResult<'tcx> {
-        if len == 0 {
-            return Ok(());
-        }
-        let alloc = self.get(ptr.alloc_id)?;
-        let frame = self.cur_frame;
-        alloc
-            .check_locks(Some(frame), ptr.offset, len, access)
-            .map_err(|lock| {
-                EvalErrorKind::MemoryLockViolation {
-                    ptr,
-                    len,
-                    frame,
-                    access,
-                    lock: lock.active,
-                }.into()
-            })
-    }
-
-    /// Acquire the lock for the given lifetime
-    pub(crate) fn acquire_lock(
-        &mut self,
-        ptr: MemoryPointer,
-        len: u64,
-        region: Option<region::Scope>,
-        kind: AccessKind,
-    ) -> EvalResult<'tcx> {
-        let frame = self.cur_frame;
-        assert!(len > 0);
-        trace!(
-            "Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}",
-            frame,
-            kind,
-            ptr,
-            len,
-            region
-        );
-        self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
-        let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
-
-        // Iterate over our range and acquire the lock.  If the range is already split into pieces,
-        // we have to manipulate all of them.
-        let lifetime = DynamicLifetime { frame, region };
-        for lock in alloc.locks.iter_mut(ptr.offset, len) {
-            if !lock.access_permitted(None, kind) {
-                return err!(MemoryAcquireConflict {
-                    ptr,
-                    len,
-                    kind,
-                    lock: lock.active.clone(),
-                });
-            }
-            // See what we have to do
-            match (&mut lock.active, kind) {
-                (active @ &mut NoLock, AccessKind::Write) => {
-                    *active = WriteLock(lifetime);
-                }
-                (active @ &mut NoLock, AccessKind::Read) => {
-                    *active = ReadLock(vec![lifetime]);
-                }
-                (&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
-                    lifetimes.push(lifetime);
-                }
-                _ => bug!("We already checked that there is no conflicting lock"),
-            }
-        }
-        Ok(())
-    }
-
-    /// Release or suspend a write lock of the given lifetime prematurely.
-    /// When releasing, if there is a read lock or someone else's write lock, that's an error.
-    /// If no lock is held, that's fine.  This can happen when e.g. a local is initialized
-    /// from a constant, and then suspended.
-    /// When suspending, the same cases are fine; we just register an additional suspension.
-    pub(crate) fn suspend_write_lock(
-        &mut self,
-        ptr: MemoryPointer,
-        len: u64,
-        lock_path: &AbsLvalue<'tcx>,
-        suspend: Option<region::Scope>,
-    ) -> EvalResult<'tcx> {
-        assert!(len > 0);
-        let cur_frame = self.cur_frame;
-        let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
-
-        'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) {
-            let is_our_lock = match lock.active {
-                WriteLock(lft) =>
-                    // Double-check that we are holding the lock.
-                    // (Due to subtyping, checking the region would not make any sense.)
-                    lft.frame == cur_frame,
-                ReadLock(_) | NoLock => false,
-            };
-            if is_our_lock {
-                trace!("Releasing {:?}", lock.active);
-                // Disable the lock
-                lock.active = NoLock;
-            } else {
-                trace!(
-                    "Not touching {:?} as it is not our lock",
-                    lock.active,
-                );
-            }
-            // Check if we want to register a suspension
-            if let Some(suspend_region) = suspend {
-                let lock_id = WriteLockId {
-                    frame: cur_frame,
-                    path: lock_path.clone(),
-                };
-                trace!("Adding suspension to {:?}", lock_id);
-                let mut new_suspension = false;
-                lock.suspended
-                    .entry(lock_id)
-                    // Remember whether we added a new suspension or not
-                    .or_insert_with(|| { new_suspension = true; Vec::new() })
-                    .push(suspend_region);
-                // If the suspension is new, we should have owned this.
-                // If there already was a suspension, we should NOT have owned this.
-                if new_suspension == is_our_lock {
-                    // All is well
-                    continue 'locks;
-                }
-            } else {
-                if !is_our_lock {
-                    // All is well.
-                    continue 'locks;
-                }
-            }
-            // If we get here, releasing this is an error except for NoLock.
-            if lock.active != NoLock {
-                return err!(InvalidMemoryLockRelease {
-                    ptr,
-                    len,
-                    frame: cur_frame,
-                    lock: lock.active.clone(),
-                });
-            }
-        }
-
-        Ok(())
-    }
-
-    /// Release a suspension from the write lock.  If this is the last suspension or if there is no suspension, acquire the lock.
-    pub(crate) fn recover_write_lock(
-        &mut self,
-        ptr: MemoryPointer,
-        len: u64,
-        lock_path: &AbsLvalue<'tcx>,
-        lock_region: Option<region::Scope>,
-        suspended_region: region::Scope,
-    ) -> EvalResult<'tcx> {
-        assert!(len > 0);
-        let cur_frame = self.cur_frame;
-        let lock_id = WriteLockId {
-            frame: cur_frame,
-            path: lock_path.clone(),
-        };
-        let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
-
-        for lock in alloc.locks.iter_mut(ptr.offset, len) {
-            // Check if we have a suspension here
-            let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
-                None => {
-                    trace!("No suspension around, we can just acquire");
-                    (true, false)
-                }
-                Some(suspensions) => {
-                    trace!("Found suspension of {:?}, removing it", lock_id);
-                    // That's us!  Remove suspension (it should be in there).  The same suspension can
-                    // occur multiple times (when there are multiple shared borrows of this that have the same
-                    // lifetime); only remove one of them.
-                    let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
-                        None => // TODO: Can the user trigger this?
-                            bug!("We have this lock suspended, but not for the given region."),
-                        Some((idx, _)) => idx
-                    };
-                    suspensions.remove(idx);
-                    let got_lock = suspensions.is_empty();
-                    if got_lock {
-                        trace!("All suspensions are gone, we can have the lock again");
-                    }
-                    (got_lock, got_lock)
-                }
-            };
-            if remove_suspension {
-                // with NLL, we could do that up in the match above...
-                assert!(got_the_lock);
-                lock.suspended.remove(&lock_id);
-            }
-            if got_the_lock {
-                match lock.active {
-                    ref mut active @ NoLock => {
-                        *active = WriteLock(
-                            DynamicLifetime {
-                                frame: cur_frame,
-                                region: lock_region,
-                            }
-                        );
-                    }
-                    _ => {
-                        return err!(MemoryAcquireConflict {
-                            ptr,
-                            len,
-                            kind: AccessKind::Write,
-                            lock: lock.active.clone(),
-                        })
-                    }
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>) {
-        let cur_frame = self.cur_frame;
-        trace!(
-            "Releasing frame {} locks that expire at {:?}",
-            cur_frame,
-            ending_region
-        );
-        let has_ended = |lifetime: &DynamicLifetime| -> bool {
-            if lifetime.frame != cur_frame {
-                return false;
-            }
-            match ending_region {
-                None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
-                // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
-                // end of a function.  Same for a function still having recoveries.
-                Some(ending_region) => lifetime.region == Some(ending_region),
-            }
-        };
-
-        for alloc in self.alloc_map.values_mut() {
-            for lock in alloc.locks.iter_mut_all() {
-                // Delete everything that ends now -- i.e., keep only all the other lifetimes.
-                let lock_ended = match lock.active {
-                    WriteLock(ref lft) => has_ended(lft),
-                    ReadLock(ref mut lfts) => {
-                        lfts.retain(|lft| !has_ended(lft));
-                        lfts.is_empty()
-                    }
-                    NoLock => false,
-                };
-                if lock_ended {
-                    lock.active = NoLock;
-                }
-                // Also clean up suspended write locks when the function returns
-                if ending_region.is_none() {
-                    lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
-                }
-            }
-            // Clean up the map
-            alloc.locks.retain(|lock| match lock.active {
-                NoLock => lock.suspended.len() > 0,
-                _ => true,
-            });
-        }
-    }
-}
-
-/// Allocation accessors
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<'tcx, M::MemoryKinds>> {
-        match id.into_alloc_id_kind() {
-            AllocIdKind::Function(_) => err!(DerefFunctionPointer),
-            AllocIdKind::Runtime(id) => {
-                match self.alloc_map.get(&id) {
-                    Some(alloc) => Ok(alloc),
-                    None => err!(DanglingPointerDeref),
-                }
-            }
-        }
-    }
-
-    fn get_mut_unchecked(
-        &mut self,
-        id: AllocId,
-    ) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
-        match id.into_alloc_id_kind() {
-            AllocIdKind::Function(_) => err!(DerefFunctionPointer),
-            AllocIdKind::Runtime(id) => {
-                match self.alloc_map.get_mut(&id) {
-                    Some(alloc) => Ok(alloc),
-                    None => err!(DanglingPointerDeref),
-                }
-            }
-        }
-    }
-
-    fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation<'tcx, M::MemoryKinds>> {
-        let alloc = self.get_mut_unchecked(id)?;
-        if alloc.mutable == Mutability::Mutable {
-            Ok(alloc)
-        } else {
-            err!(ModifiedConstantMemory)
-        }
-    }
-
-    pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> {
-        if ptr.offset != 0 {
-            return err!(InvalidFunctionPointer);
-        }
-        debug!("reading fn ptr: {}", ptr.alloc_id);
-        match ptr.alloc_id.into_alloc_id_kind() {
-            AllocIdKind::Function(id) => Ok(self.functions[id]),
-            AllocIdKind::Runtime(_) => err!(ExecuteMemory),
-        }
-    }
-
-    /// For debugging, print an allocation and all allocations it points to, recursively.
-    pub fn dump_alloc(&self, id: AllocId) {
-        self.dump_allocs(vec![id]);
-    }
-
-    /// For debugging, print a list of allocations and all allocations they point to, recursively.
-    pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
-        use std::fmt::Write;
-        allocs.sort();
-        allocs.dedup();
-        let mut allocs_to_print = VecDeque::from(allocs);
-        let mut allocs_seen = HashSet::new();
-
-        while let Some(id) = allocs_to_print.pop_front() {
-            let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
-            let prefix_len = msg.len();
-            let mut relocations = vec![];
-
-            let alloc = match id.into_alloc_id_kind() {
-                AllocIdKind::Function(id) => {
-                    trace!("{} {}", msg, self.functions[id]);
-                    continue;
-                }
-                AllocIdKind::Runtime(id) => {
-                    match self.alloc_map.get(&id) {
-                        Some(a) => a,
-                        None => {
-                            trace!("{} (deallocated)", msg);
-                            continue;
-                        }
-                    }
-                }
-            };
-
-            for i in 0..(alloc.bytes.len() as u64) {
-                if let Some(&target_id) = alloc.relocations.get(&i) {
-                    if allocs_seen.insert(target_id) {
-                        allocs_to_print.push_back(target_id);
-                    }
-                    relocations.push((i, target_id));
-                }
-                if alloc.undef_mask.is_range_defined(i, i + 1) {
-                    // this `as usize` is fine, since `i` came from a `usize`
-                    write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
-                } else {
-                    msg.push_str("__ ");
-                }
-            }
-
-            let immutable = match (alloc.kind, alloc.mutable) {
-                (MemoryKind::UninitializedStatic, _) => {
-                    " (static in the process of initialization)".to_owned()
-                }
-                (MemoryKind::Static, Mutability::Mutable) => " (static mut)".to_owned(),
-                (MemoryKind::Static, Mutability::Immutable) => " (immutable)".to_owned(),
-                (MemoryKind::Machine(m), _) => format!(" ({:?})", m),
-                (MemoryKind::Stack, _) => " (stack)".to_owned(),
-            };
-            trace!(
-                "{}({} bytes, alignment {}){}",
-                msg,
-                alloc.bytes.len(),
-                alloc.align,
-                immutable
-            );
-
-            if !relocations.is_empty() {
-                msg.clear();
-                write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
-                let mut pos = 0;
-                let relocation_width = (self.pointer_size() - 1) * 3;
-                for (i, target_id) in relocations {
-                    // this `as usize` is fine, since we can't print more chars than `usize::MAX`
-                    write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
-                    let target = format!("({})", target_id);
-                    // this `as usize` is fine, since we can't print more chars than `usize::MAX`
-                    write!(msg, "â””{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
-                    pos = i + self.pointer_size();
-                }
-                trace!("{}", msg);
-            }
-        }
-    }
-
-    pub fn leak_report(&self) -> usize {
-        trace!("### LEAK REPORT ###");
-        let leaks: Vec<_> = self.alloc_map
-            .iter()
-            .filter_map(|(&key, val)| if val.kind != MemoryKind::Static {
-                Some(AllocIdKind::Runtime(key).into_alloc_id())
-            } else {
-                None
-            })
-            .collect();
-        let n = leaks.len();
-        self.dump_allocs(leaks);
-        n
-    }
-}
-
-/// Byte accessors
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    fn get_bytes_unchecked(
-        &self,
-        ptr: MemoryPointer,
-        size: u64,
-        align: u64,
-    ) -> EvalResult<'tcx, &[u8]> {
-        // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
-        self.check_align(ptr.into(), align, Some(AccessKind::Read))?;
-        if size == 0 {
-            return Ok(&[]);
-        }
-        self.check_locks(ptr, size, AccessKind::Read)?;
-        self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
-        let alloc = self.get(ptr.alloc_id)?;
-        assert_eq!(ptr.offset as usize as u64, ptr.offset);
-        assert_eq!(size as usize as u64, size);
-        let offset = ptr.offset as usize;
-        Ok(&alloc.bytes[offset..offset + size as usize])
-    }
-
-    fn get_bytes_unchecked_mut(
-        &mut self,
-        ptr: MemoryPointer,
-        size: u64,
-        align: u64,
-    ) -> EvalResult<'tcx, &mut [u8]> {
-        // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
-        self.check_align(ptr.into(), align, Some(AccessKind::Write))?;
-        if size == 0 {
-            return Ok(&mut []);
-        }
-        self.check_locks(ptr, size, AccessKind::Write)?;
-        self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
-        let alloc = self.get_mut(ptr.alloc_id)?;
-        assert_eq!(ptr.offset as usize as u64, ptr.offset);
-        assert_eq!(size as usize as u64, size);
-        let offset = ptr.offset as usize;
-        Ok(&mut alloc.bytes[offset..offset + size as usize])
-    }
-
-    fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
-        assert_ne!(size, 0);
-        if self.relocations(ptr, size)?.count() != 0 {
-            return err!(ReadPointerAsBytes);
-        }
-        self.check_defined(ptr, size)?;
-        self.get_bytes_unchecked(ptr, size, align)
-    }
-
-    fn get_bytes_mut(
-        &mut self,
-        ptr: MemoryPointer,
-        size: u64,
-        align: u64,
-    ) -> EvalResult<'tcx, &mut [u8]> {
-        assert_ne!(size, 0);
-        self.clear_relocations(ptr, size)?;
-        self.mark_definedness(ptr.into(), size, true)?;
-        self.get_bytes_unchecked_mut(ptr, size, align)
-    }
-}
-
-/// Reading and writing
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    /// mark an allocation pointed to by a static as static and initialized
-    fn mark_inner_allocation_initialized(
-        &mut self,
-        alloc: AllocId,
-        mutability: Mutability,
-    ) -> EvalResult<'tcx> {
-        // relocations into other statics are not "inner allocations"
-        if self.get(alloc).ok().map_or(false, |alloc| {
-            alloc.kind != MemoryKind::UninitializedStatic
-        })
-        {
-            self.mark_static_initalized(alloc, mutability)?;
-        }
-        Ok(())
-    }
-
-    /// mark an allocation as static and initialized, either mutable or not
-    pub fn mark_static_initalized(
-        &mut self,
-        alloc_id: AllocId,
-        mutability: Mutability,
-    ) -> EvalResult<'tcx> {
-        trace!(
-            "mark_static_initalized {:?}, mutability: {:?}",
-            alloc_id,
-            mutability
-        );
-        // do not use `self.get_mut(alloc_id)` here, because we might have already marked a
-        // sub-element or have circular pointers (e.g. `Rc`-cycles)
-        let alloc_id = match alloc_id.into_alloc_id_kind() {
-            AllocIdKind::Function(_) => return Ok(()),
-            AllocIdKind::Runtime(id) => id,
-        };
-        let relocations = match self.alloc_map.get_mut(&alloc_id) {
-            Some(&mut Allocation {
-                     ref mut relocations,
-                     ref mut kind,
-                     ref mut mutable,
-                     ..
-                 }) => {
-                match *kind {
-                    // const eval results can refer to "locals".
-                    // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1`
-                    MemoryKind::Stack |
-                    // The entire point of this function
-                    MemoryKind::UninitializedStatic => {},
-                    MemoryKind::Machine(m) => M::mark_static_initialized(m)?,
-                    MemoryKind::Static => {
-                        trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized");
-                        return Ok(());
-                    },
-                }
-                *kind = MemoryKind::Static;
-                *mutable = mutability;
-                // take out the relocations vector to free the borrow on self, so we can call
-                // mark recursively
-                mem::replace(relocations, Default::default())
-            }
-            None => return err!(DanglingPointerDeref),
-        };
-        // recurse into inner allocations
-        for &alloc in relocations.values() {
-            self.mark_inner_allocation_initialized(alloc, mutability)?;
-        }
-        // put back the relocations
-        self.alloc_map
-            .get_mut(&alloc_id)
-            .expect("checked above")
-            .relocations = relocations;
-        Ok(())
-    }
-
-    pub fn copy(
-        &mut self,
-        src: Pointer,
-        dest: Pointer,
-        size: u64,
-        align: u64,
-        nonoverlapping: bool,
-    ) -> EvalResult<'tcx> {
-        // Empty accesses don't need to be valid pointers, but they should still be aligned
-        self.check_align(src, align, Some(AccessKind::Read))?;
-        self.check_align(dest, align, Some(AccessKind::Write))?;
-        if size == 0 {
-            return Ok(());
-        }
-        let src = src.to_ptr()?;
-        let dest = dest.to_ptr()?;
-        self.check_relocation_edges(src, size)?;
-
-        // first copy the relocations to a temporary buffer, because
-        // `get_bytes_mut` will clear the relocations, which is correct,
-        // since we don't want to keep any relocations at the target.
-
-        let relocations: Vec<_> = self.relocations(src, size)?
-            .map(|(&offset, &alloc_id)| {
-                // Update relocation offsets for the new positions in the destination allocation.
-                (offset + dest.offset - src.offset, alloc_id)
-            })
-            .collect();
-
-        let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr();
-        let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr();
-
-        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
-        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
-        // `dest` could possibly overlap.
-        unsafe {
-            assert_eq!(size as usize as u64, size);
-            if src.alloc_id == dest.alloc_id {
-                if nonoverlapping {
-                    if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
-                        (dest.offset <= src.offset && dest.offset + size > src.offset)
-                    {
-                        return err!(Intrinsic(
-                            format!("copy_nonoverlapping called on overlapping ranges"),
-                        ));
-                    }
-                }
-                ptr::copy(src_bytes, dest_bytes, size as usize);
-            } else {
-                ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
-            }
-        }
-
-        self.copy_undef_mask(src, dest, size)?;
-        // copy back the relocations
-        self.get_mut(dest.alloc_id)?.relocations.extend(relocations);
-
-        Ok(())
-    }
-
-    pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
-        let alloc = self.get(ptr.alloc_id)?;
-        assert_eq!(ptr.offset as usize as u64, ptr.offset);
-        let offset = ptr.offset as usize;
-        match alloc.bytes[offset..].iter().position(|&c| c == 0) {
-            Some(size) => {
-                if self.relocations(ptr, (size + 1) as u64)?.count() != 0 {
-                    return err!(ReadPointerAsBytes);
-                }
-                self.check_defined(ptr, (size + 1) as u64)?;
-                self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?;
-                Ok(&alloc.bytes[offset..offset + size])
-            }
-            None => err!(UnterminatedCString(ptr)),
-        }
-    }
-
-    pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
-        // Empty accesses don't need to be valid pointers, but they should still be non-NULL
-        self.check_align(ptr, 1, Some(AccessKind::Read))?;
-        if size == 0 {
-            return Ok(&[]);
-        }
-        self.get_bytes(ptr.to_ptr()?, size, 1)
-    }
-
-    pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
-        // Empty accesses don't need to be valid pointers, but they should still be non-NULL
-        self.check_align(ptr, 1, Some(AccessKind::Write))?;
-        if src.is_empty() {
-            return Ok(());
-        }
-        let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
-        bytes.clone_from_slice(src);
-        Ok(())
-    }
-
-    pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
-        // Empty accesses don't need to be valid pointers, but they should still be non-NULL
-        self.check_align(ptr, 1, Some(AccessKind::Write))?;
-        if count == 0 {
-            return Ok(());
-        }
-        let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
-        for b in bytes {
-            *b = val;
-        }
-        Ok(())
-    }
-
-    pub fn read_primval(&self, ptr: MemoryPointer, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> {
-        self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
-        let endianess = self.endianess();
-        let bytes = self.get_bytes_unchecked(ptr, size, self.int_align(size))?;
-        // Undef check happens *after* we established that the alignment is correct.
-        // We must not return Ok() for unaligned pointers!
-        if self.check_defined(ptr, size).is_err() {
-            return Ok(PrimVal::Undef.into());
-        }
-        // Now we do the actual reading
-        let bytes = if signed {
-            read_target_int(endianess, bytes).unwrap() as u128
-        } else {
-            read_target_uint(endianess, bytes).unwrap()
-        };
-        // See if we got a pointer
-        if size != self.pointer_size() {
-            if self.relocations(ptr, size)?.count() != 0 {
-                return err!(ReadPointerAsBytes);
-            }
-        } else {
-            let alloc = self.get(ptr.alloc_id)?;
-            match alloc.relocations.get(&ptr.offset) {
-                Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, bytes as u64))),
-                None => {},
-            }
-        }
-        // We don't. Just return the bytes.
-        Ok(PrimVal::Bytes(bytes))
-    }
-
-    pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer) -> EvalResult<'tcx, PrimVal> {
-        self.read_primval(ptr, self.pointer_size(), false)
-    }
-
-    pub fn write_primval(&mut self, ptr: MemoryPointer, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
-        let endianess = self.endianess();
-
-        let bytes = match val {
-            PrimVal::Ptr(val) => {
-                assert_eq!(size, self.pointer_size());
-                val.offset as u128
-            }
-
-            PrimVal::Bytes(bytes) => {
-                // We need to mask here, or the byteorder crate can die when given a u64 larger
-                // than fits in an integer of the requested size.
-                let mask = match size {
-                    1 => !0u8 as u128,
-                    2 => !0u16 as u128,
-                    4 => !0u32 as u128,
-                    8 => !0u64 as u128,
-                    16 => !0,
-                    n => bug!("unexpected PrimVal::Bytes size: {}", n),
-                };
-                bytes & mask
-            }
-
-            PrimVal::Undef => {
-                self.mark_definedness(PrimVal::Ptr(ptr).into(), size, false)?;
-                return Ok(());
-            }
-        };
-
-        {
-            let align = self.int_align(size);
-            let dst = self.get_bytes_mut(ptr, size, align)?;
-            if signed {
-                write_target_int(endianess, dst, bytes as i128).unwrap();
-            } else {
-                write_target_uint(endianess, dst, bytes).unwrap();
-            }
-        }
-
-        // See if we have to also write a relocation
-        match val {
-            PrimVal::Ptr(val) => {
-                self.get_mut(ptr.alloc_id)?.relocations.insert(
-                    ptr.offset,
-                    val.alloc_id,
-                );
-            }
-            _ => {}
-        }
-
-        Ok(())
-    }
-
-    pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, val: PrimVal) -> EvalResult<'tcx> {
-        let ptr_size = self.pointer_size();
-        self.write_primval(ptr, val, ptr_size, false)
-    }
-
-    fn int_align(&self, size: u64) -> u64 {
-        // We assume pointer-sized integers have the same alignment as pointers.
-        // We also assume signed and unsigned integers of the same size have the same alignment.
-        match size {
-            1 => self.layout.i8_align.abi(),
-            2 => self.layout.i16_align.abi(),
-            4 => self.layout.i32_align.abi(),
-            8 => self.layout.i64_align.abi(),
-            16 => self.layout.i128_align.abi(),
-            _ => bug!("bad integer size: {}", size),
-        }
-    }
-}
-
-/// Relocations
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    fn relocations(
-        &self,
-        ptr: MemoryPointer,
-        size: u64,
-    ) -> EvalResult<'tcx, btree_map::Range<u64, AllocId>> {
-        let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
-        let end = ptr.offset + size;
-        Ok(self.get(ptr.alloc_id)?.relocations.range(start..end))
-    }
-
-    fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
-        // Find all relocations overlapping the given range.
-        let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
-        if keys.is_empty() {
-            return Ok(());
-        }
-
-        // Find the start and end of the given range and its outermost relocations.
-        let start = ptr.offset;
-        let end = start + size;
-        let first = *keys.first().unwrap();
-        let last = *keys.last().unwrap() + self.pointer_size();
-
-        let alloc = self.get_mut(ptr.alloc_id)?;
-
-        // Mark parts of the outermost relocations as undefined if they partially fall outside the
-        // given range.
-        if first < start {
-            alloc.undef_mask.set_range(first, start, false);
-        }
-        if last > end {
-            alloc.undef_mask.set_range(end, last, false);
-        }
-
-        // Forget all the relocations.
-        for k in keys {
-            alloc.relocations.remove(&k);
-        }
-
-        Ok(())
-    }
-
-    fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
-        let overlapping_start = self.relocations(ptr, 0)?.count();
-        let overlapping_end = self.relocations(ptr.offset(size, self.layout)?, 0)?.count();
-        if overlapping_start + overlapping_end != 0 {
-            return err!(ReadPointerAsBytes);
-        }
-        Ok(())
-    }
-}
-
-/// Undefined bytes
-impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
-    // FIXME(solson): This is a very naive, slow version.
-    fn copy_undef_mask(
-        &mut self,
-        src: MemoryPointer,
-        dest: MemoryPointer,
-        size: u64,
-    ) -> EvalResult<'tcx> {
-        // The bits have to be saved locally before writing to dest in case src and dest overlap.
-        assert_eq!(size as usize as u64, size);
-        let mut v = Vec::with_capacity(size as usize);
-        for i in 0..size {
-            let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
-            v.push(defined);
-        }
-        for (i, defined) in v.into_iter().enumerate() {
-            self.get_mut(dest.alloc_id)?.undef_mask.set(
-                dest.offset +
-                    i as u64,
-                defined,
-            );
-        }
-        Ok(())
-    }
-
-    fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
-        let alloc = self.get(ptr.alloc_id)?;
-        if !alloc.undef_mask.is_range_defined(
-            ptr.offset,
-            ptr.offset + size,
-        )
-        {
-            return err!(ReadUndefBytes);
-        }
-        Ok(())
-    }
-
-    pub fn mark_definedness(
-        &mut self,
-        ptr: Pointer,
-        size: u64,
-        new_state: bool,
-    ) -> EvalResult<'tcx> {
-        if size == 0 {
-            return Ok(());
-        }
-        let ptr = ptr.to_ptr()?;
-        let alloc = self.get_mut(ptr.alloc_id)?;
-        alloc.undef_mask.set_range(
-            ptr.offset,
-            ptr.offset + size,
-            new_state,
-        );
-        Ok(())
-    }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Methods to access integers in the target endianess
-////////////////////////////////////////////////////////////////////////////////
-
-fn write_target_uint(
-    endianess: layout::Endian,
-    mut target: &mut [u8],
-    data: u128,
-) -> Result<(), io::Error> {
-    let len = target.len();
-    match endianess {
-        layout::Endian::Little => target.write_uint128::<LittleEndian>(data, len),
-        layout::Endian::Big => target.write_uint128::<BigEndian>(data, len),
-    }
-}
-fn write_target_int(
-    endianess: layout::Endian,
-    mut target: &mut [u8],
-    data: i128,
-) -> Result<(), io::Error> {
-    let len = target.len();
-    match endianess {
-        layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
-        layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
-    }
-}
-
-fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
-    match endianess {
-        layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
-        layout::Endian::Big => source.read_uint128::<BigEndian>(source.len()),
-    }
-}
-
-fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result<i128, io::Error> {
-    match endianess {
-        layout::Endian::Little => source.read_int128::<LittleEndian>(source.len()),
-        layout::Endian::Big => source.read_int128::<BigEndian>(source.len()),
-    }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Undefined byte tracking
-////////////////////////////////////////////////////////////////////////////////
-
-type Block = u64;
-const BLOCK_SIZE: u64 = 64;
-
-#[derive(Clone, Debug)]
-pub struct UndefMask {
-    blocks: Vec<Block>,
-    len: u64,
-}
-
-impl UndefMask {
-    fn new(size: u64) -> Self {
-        let mut m = UndefMask {
-            blocks: vec![],
-            len: 0,
-        };
-        m.grow(size, false);
-        m
-    }
-
-    /// Check whether the range `start..end` (end-exclusive) is entirely defined.
-    pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
-        if end > self.len {
-            return false;
-        }
-        for i in start..end {
-            if !self.get(i) {
-                return false;
-            }
-        }
-        true
-    }
-
-    fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
-        let len = self.len;
-        if end > len {
-            self.grow(end - len, new_state);
-        }
-        self.set_range_inbounds(start, end, new_state);
-    }
-
-    fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
-        for i in start..end {
-            self.set(i, new_state);
-        }
-    }
-
-    fn get(&self, i: u64) -> bool {
-        let (block, bit) = bit_index(i);
-        (self.blocks[block] & 1 << bit) != 0
-    }
-
-    fn set(&mut self, i: u64, new_state: bool) {
-        let (block, bit) = bit_index(i);
-        if new_state {
-            self.blocks[block] |= 1 << bit;
-        } else {
-            self.blocks[block] &= !(1 << bit);
-        }
-    }
-
-    fn grow(&mut self, amount: u64, new_state: bool) {
-        let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
-        if amount > unused_trailing_bits {
-            let additional_blocks = amount / BLOCK_SIZE + 1;
-            assert_eq!(additional_blocks as usize as u64, additional_blocks);
-            self.blocks.extend(
-                iter::repeat(0).take(additional_blocks as usize),
-            );
-        }
-        let start = self.len;
-        self.len += amount;
-        self.set_range_inbounds(start, start + amount, new_state);
-    }
-}
-
-fn bit_index(bits: u64) -> (usize, usize) {
-    let a = bits / BLOCK_SIZE;
-    let b = bits % BLOCK_SIZE;
-    assert_eq!(a as usize as u64, a);
-    assert_eq!(b as usize as u64, b);
-    (a as usize, b as usize)
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Unaligned accesses
-////////////////////////////////////////////////////////////////////////////////
-
-pub trait HasMemory<'a, 'tcx, M: Machine<'tcx>> {
-    fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>;
-    fn memory(&self) -> &Memory<'a, 'tcx, M>;
-
-    // These are not supposed to be overriden.
-    fn read_maybe_aligned<F, T>(&self, aligned: bool, f: F) -> EvalResult<'tcx, T>
-    where
-        F: FnOnce(&Self) -> EvalResult<'tcx, T>,
-    {
-        let old = self.memory().reads_are_aligned.get();
-        // Do alignment checking if *all* nested calls say it has to be aligned.
-        self.memory().reads_are_aligned.set(old && aligned);
-        let t = f(self);
-        self.memory().reads_are_aligned.set(old);
-        t
-    }
-
-    fn read_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
-    where
-        F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
-    {
-        let old = self.memory().reads_are_aligned.get();
-        // Do alignment checking if *all* nested calls say it has to be aligned.
-        self.memory().reads_are_aligned.set(old && aligned);
-        let t = f(self);
-        self.memory().reads_are_aligned.set(old);
-        t
-    }
-
-    fn write_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
-    where
-        F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
-    {
-        let old = self.memory().writes_are_aligned.get();
-        // Do alignment checking if *all* nested calls say it has to be aligned.
-        self.memory().writes_are_aligned.set(old && aligned);
-        let t = f(self);
-        self.memory().writes_are_aligned.set(old);
-        t
-    }
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for Memory<'a, 'tcx, M> {
-    #[inline]
-    fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
-        self
-    }
-
-    #[inline]
-    fn memory(&self) -> &Memory<'a, 'tcx, M> {
-        self
-    }
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> HasMemory<'a, 'tcx, M> for EvalContext<'a, 'tcx, M> {
-    #[inline]
-    fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
-        &mut self.memory
-    }
-
-    #[inline]
-    fn memory(&self) -> &Memory<'a, 'tcx, M> {
-        &self.memory
-    }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Pointer arithmetic
-////////////////////////////////////////////////////////////////////////////////
-
-pub trait PointerArithmetic: layout::HasDataLayout {
-    // These are not supposed to be overriden.
-
-    //// Trunace the given value to the pointer size; also return whether there was an overflow
-    fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
-        let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
-        ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
-    }
-
-    // Overflow checking only works properly on the range from -u64 to +u64.
-    fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
-        // FIXME: is it possible to over/underflow here?
-        if i < 0 {
-            // trickery to ensure that i64::min_value() works fine
-            // this formula only works for true negative values, it panics for zero!
-            let n = u64::max_value() - (i as u64) + 1;
-            val.overflowing_sub(n)
-        } else {
-            self.overflowing_offset(val, i as u64)
-        }
-    }
-
-    fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
-        let (res, over1) = val.overflowing_add(i);
-        let (res, over2) = self.truncate_to_ptr(res as u128);
-        (res, over1 || over2)
-    }
-
-    fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
-        let (res, over) = self.overflowing_signed_offset(val, i as i128);
-        if over { err!(OverflowingMath) } else { Ok(res) }
-    }
-
-    fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
-        let (res, over) = self.overflowing_offset(val, i);
-        if over { err!(OverflowingMath) } else { Ok(res) }
-    }
-
-    fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
-        self.overflowing_signed_offset(val, i as i128).0
-    }
-}
-
-impl<T: layout::HasDataLayout> PointerArithmetic for T {}
-
-impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a Memory<'a, 'tcx, M> {
-    #[inline]
-    fn data_layout(&self) -> &TargetDataLayout {
-        self.layout
-    }
-}
-impl<'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'a EvalContext<'a, 'tcx, M> {
-    #[inline]
-    fn data_layout(&self) -> &TargetDataLayout {
-        self.memory().layout
-    }
-}
-
-impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout
-    for &'c &'b mut EvalContext<'a, 'tcx, M> {
-    #[inline]
-    fn data_layout(&self) -> &TargetDataLayout {
-        self.memory().layout
-    }
-}
diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs
deleted file mode 100644 (file)
index 08837c4..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-//! An interpreter for MIR used in CTFE and by miri
-
-#[macro_export]
-macro_rules! err {
-    ($($tt:tt)*) => { Err($crate::interpret::EvalErrorKind::$($tt)*.into()) };
-}
-
-mod cast;
-mod const_eval;
-mod error;
-mod eval_context;
-mod lvalue;
-mod validation;
-mod machine;
-mod memory;
-mod operator;
-mod range_map;
-mod step;
-mod terminator;
-mod traits;
-mod value;
-
-pub use self::error::{EvalError, EvalResult, EvalErrorKind};
-
-pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, DynamicLifetime,
-                             TyAndPacked, PtrAndAlign, ValTy};
-
-pub use self::lvalue::{Lvalue, LvalueExtra, GlobalId};
-
-pub use self::memory::{AllocId, Memory, MemoryPointer, MemoryKind, HasMemory, AccessKind, AllocIdKind};
-
-use self::memory::{PointerArithmetic, Lock};
-
-use self::range_map::RangeMap;
-
-pub use self::value::{PrimVal, PrimValKind, Value, Pointer};
-
-pub use self::const_eval::{eval_body_as_integer, eval_body_as_primval};
-
-pub use self::machine::Machine;
-
-pub use self::validation::{ValidationQuery, AbsLvalue};
diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs
deleted file mode 100644 (file)
index 7fe4691..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-use rustc::mir;
-use rustc::ty::Ty;
-use rustc_const_math::ConstFloat;
-use syntax::ast::FloatTy;
-use std::cmp::Ordering;
-
-use super::{EvalResult, EvalContext, Lvalue, Machine, ValTy};
-
-use super::value::{PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64, f32_to_bytes,
-                   f64_to_bytes};
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    fn binop_with_overflow(
-        &mut self,
-        op: mir::BinOp,
-        left: ValTy<'tcx>,
-        right: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, (PrimVal, bool)> {
-        let left_val = self.value_to_primval(left)?;
-        let right_val = self.value_to_primval(right)?;
-        self.binary_op(op, left_val, left.ty, right_val, right.ty)
-    }
-
-    /// Applies the binary operation `op` to the two operands and writes a tuple of the result
-    /// and a boolean signifying the potential overflow to the destination.
-    pub fn intrinsic_with_overflow(
-        &mut self,
-        op: mir::BinOp,
-        left: ValTy<'tcx>,
-        right: ValTy<'tcx>,
-        dest: Lvalue,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx> {
-        let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
-        let val = Value::ByValPair(val, PrimVal::from_bool(overflowed));
-        let valty = ValTy {
-            value: val,
-            ty: dest_ty,
-        };
-        self.write_value(valty, dest)
-    }
-
-    /// Applies the binary operation `op` to the arguments and writes the result to the
-    /// destination. Returns `true` if the operation overflowed.
-    pub fn intrinsic_overflowing(
-        &mut self,
-        op: mir::BinOp,
-        left: ValTy<'tcx>,
-        right: ValTy<'tcx>,
-        dest: Lvalue,
-        dest_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, bool> {
-        let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
-        self.write_primval(dest, val, dest_ty)?;
-        Ok(overflowed)
-    }
-}
-
-macro_rules! overflow {
-    ($op:ident, $l:expr, $r:expr) => ({
-        let (val, overflowed) = $l.$op($r);
-        let primval = PrimVal::Bytes(val as u128);
-        Ok((primval, overflowed))
-    })
-}
-
-macro_rules! int_arithmetic {
-    ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
-        let l = $l;
-        let r = $r;
-        use super::PrimValKind::*;
-        match $kind {
-            I8  => overflow!($int_op, l as i8,  r as i8),
-            I16 => overflow!($int_op, l as i16, r as i16),
-            I32 => overflow!($int_op, l as i32, r as i32),
-            I64 => overflow!($int_op, l as i64, r as i64),
-            I128 => overflow!($int_op, l as i128, r as i128),
-            U8  => overflow!($int_op, l as u8,  r as u8),
-            U16 => overflow!($int_op, l as u16, r as u16),
-            U32 => overflow!($int_op, l as u32, r as u32),
-            U64 => overflow!($int_op, l as u64, r as u64),
-            U128 => overflow!($int_op, l as u128, r as u128),
-            _ => bug!("int_arithmetic should only be called on int primvals"),
-        }
-    })
-}
-
-macro_rules! int_shift {
-    ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
-        let l = $l;
-        let r = $r;
-        let r_wrapped = r as u32;
-        match $kind {
-            I8  => overflow!($int_op, l as i8,  r_wrapped),
-            I16 => overflow!($int_op, l as i16, r_wrapped),
-            I32 => overflow!($int_op, l as i32, r_wrapped),
-            I64 => overflow!($int_op, l as i64, r_wrapped),
-            I128 => overflow!($int_op, l as i128, r_wrapped),
-            U8  => overflow!($int_op, l as u8,  r_wrapped),
-            U16 => overflow!($int_op, l as u16, r_wrapped),
-            U32 => overflow!($int_op, l as u32, r_wrapped),
-            U64 => overflow!($int_op, l as u64, r_wrapped),
-            U128 => overflow!($int_op, l as u128, r_wrapped),
-            _ => bug!("int_shift should only be called on int primvals"),
-        }.map(|(val, over)| (val, over || r != r_wrapped as u128))
-    })
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    /// Returns the result of the specified operation and whether it overflowed.
-    pub fn binary_op(
-        &self,
-        bin_op: mir::BinOp,
-        left: PrimVal,
-        left_ty: Ty<'tcx>,
-        right: PrimVal,
-        right_ty: Ty<'tcx>,
-    ) -> EvalResult<'tcx, (PrimVal, bool)> {
-        use rustc::mir::BinOp::*;
-        use super::PrimValKind::*;
-
-        let left_kind = self.ty_to_primval_kind(left_ty)?;
-        let right_kind = self.ty_to_primval_kind(right_ty)?;
-        //trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
-
-        // I: Handle operations that support pointers
-        if !left_kind.is_float() && !right_kind.is_float() {
-            if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? {
-                return Ok(handled);
-            }
-        }
-
-        // II: From now on, everything must be bytes, no pointers
-        let l = left.to_bytes()?;
-        let r = right.to_bytes()?;
-
-        // These ops can have an RHS with a different numeric type.
-        if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) {
-            return match bin_op {
-                Shl => int_shift!(left_kind, overflowing_shl, l, r),
-                Shr => int_shift!(left_kind, overflowing_shr, l, r),
-                _ => bug!("it has already been checked that this is a shift op"),
-            };
-        }
-
-        if left_kind != right_kind {
-            let msg = format!(
-                "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
-                bin_op,
-                left,
-                left_kind,
-                right,
-                right_kind
-            );
-            return err!(Unimplemented(msg));
-        }
-
-        let float_op = |op, l, r, ty| {
-            let l = ConstFloat {
-                bits: l,
-                ty,
-            };
-            let r = ConstFloat {
-                bits: r,
-                ty,
-            };
-            match op {
-                Eq => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Equal),
-                Ne => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Equal),
-                Lt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Less),
-                Le => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Greater),
-                Gt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Greater),
-                Ge => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Less),
-                Add => PrimVal::Bytes((l + r).unwrap().bits),
-                Sub => PrimVal::Bytes((l - r).unwrap().bits),
-                Mul => PrimVal::Bytes((l * r).unwrap().bits),
-                Div => PrimVal::Bytes((l / r).unwrap().bits),
-                Rem => PrimVal::Bytes((l % r).unwrap().bits),
-                _ => bug!("invalid float op: `{:?}`", op),
-            }
-        };
-
-        let val = match (bin_op, left_kind) {
-            (_, F32) => float_op(bin_op, l, r, FloatTy::F32),
-            (_, F64) => float_op(bin_op, l, r, FloatTy::F64),
-
-
-            (Eq, _) => PrimVal::from_bool(l == r),
-            (Ne, _) => PrimVal::from_bool(l != r),
-
-            (Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)),
-            (Lt, _) => PrimVal::from_bool(l < r),
-            (Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)),
-            (Le, _) => PrimVal::from_bool(l <= r),
-            (Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)),
-            (Gt, _) => PrimVal::from_bool(l > r),
-            (Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)),
-            (Ge, _) => PrimVal::from_bool(l >= r),
-
-            (BitOr, _) => PrimVal::Bytes(l | r),
-            (BitAnd, _) => PrimVal::Bytes(l & r),
-            (BitXor, _) => PrimVal::Bytes(l ^ r),
-
-            (Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r),
-            (Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r),
-            (Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r),
-            (Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r),
-            (Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r),
-
-            _ => {
-                let msg = format!(
-                    "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
-                    bin_op,
-                    left,
-                    left_kind,
-                    right,
-                    right_kind
-                );
-                return err!(Unimplemented(msg));
-            }
-        };
-
-        Ok((val, false))
-    }
-}
-
-pub fn unary_op<'tcx>(
-    un_op: mir::UnOp,
-    val: PrimVal,
-    val_kind: PrimValKind,
-) -> EvalResult<'tcx, PrimVal> {
-    use rustc::mir::UnOp::*;
-    use super::PrimValKind::*;
-
-    let bytes = val.to_bytes()?;
-
-    let result_bytes = match (un_op, val_kind) {
-        (Not, Bool) => !val.to_bool()? as u128,
-
-        (Not, U8) => !(bytes as u8) as u128,
-        (Not, U16) => !(bytes as u16) as u128,
-        (Not, U32) => !(bytes as u32) as u128,
-        (Not, U64) => !(bytes as u64) as u128,
-        (Not, U128) => !bytes,
-
-        (Not, I8) => !(bytes as i8) as u128,
-        (Not, I16) => !(bytes as i16) as u128,
-        (Not, I32) => !(bytes as i32) as u128,
-        (Not, I64) => !(bytes as i64) as u128,
-        (Not, I128) => !(bytes as i128) as u128,
-
-        (Neg, I8) => -(bytes as i8) as u128,
-        (Neg, I16) => -(bytes as i16) as u128,
-        (Neg, I32) => -(bytes as i32) as u128,
-        (Neg, I64) => -(bytes as i64) as u128,
-        (Neg, I128) => -(bytes as i128) as u128,
-
-        (Neg, F32) => f32_to_bytes(-bytes_to_f32(bytes)),
-        (Neg, F64) => f64_to_bytes(-bytes_to_f64(bytes)),
-
-        _ => {
-            let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val);
-            return err!(Unimplemented(msg));
-        }
-    };
-
-    Ok(PrimVal::Bytes(result_bytes))
-}
diff --git a/src/librustc_mir/interpret/range_map.rs b/src/librustc_mir/interpret/range_map.rs
deleted file mode 100644 (file)
index 5cdcbe3..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-//! Implements a map from integer indices to data.
-//! Rather than storing data for every index, internally, this maps entire ranges to the data.
-//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
-//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated).
-//! Users must not depend on whether a range is coalesced or not, even though this is observable
-//! via the iteration APIs.
-use std::collections::BTreeMap;
-use std::ops;
-
-#[derive(Clone, Debug)]
-pub struct RangeMap<T> {
-    map: BTreeMap<Range, T>,
-}
-
-// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
-// by the second field.
-// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
-// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
-// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
-// This kind of search breaks, if `end < start`, so don't do that!
-#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
-struct Range {
-    start: u64,
-    end: u64, // Invariant: end > start
-}
-
-impl Range {
-    fn range(offset: u64, len: u64) -> ops::Range<Range> {
-        assert!(len > 0);
-        // We select all elements that are within
-        // the range given by the offset into the allocation and the length.
-        // This is sound if all ranges that intersect with the argument range, are in the
-        // resulting range of ranges.
-        let left = Range {
-            // lowest range to include `offset`
-            start: 0,
-            end: offset + 1,
-        };
-        let right = Range {
-            // lowest (valid) range not to include `offset+len`
-            start: offset + len,
-            end: offset + len + 1,
-        };
-        left..right
-    }
-
-    /// Tests if all of [offset, offset+len) are contained in this range.
-    fn overlaps(&self, offset: u64, len: u64) -> bool {
-        assert!(len > 0);
-        offset < self.end && offset + len >= self.start
-    }
-}
-
-impl<T> RangeMap<T> {
-    pub fn new() -> RangeMap<T> {
-        RangeMap { map: BTreeMap::new() }
-    }
-
-    fn iter_with_range<'a>(
-        &'a self,
-        offset: u64,
-        len: u64,
-    ) -> impl Iterator<Item = (&'a Range, &'a T)> + 'a {
-        assert!(len > 0);
-        self.map.range(Range::range(offset, len)).filter_map(
-            move |(range,
-                   data)| {
-                if range.overlaps(offset, len) {
-                    Some((range, data))
-                } else {
-                    None
-                }
-            },
-        )
-    }
-
-    pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item = &'a T> + 'a {
-        self.iter_with_range(offset, len).map(|(_, data)| data)
-    }
-
-    fn split_entry_at(&mut self, offset: u64)
-    where
-        T: Clone,
-    {
-        let range = match self.iter_with_range(offset, 1).next() {
-            Some((&range, _)) => range,
-            None => return,
-        };
-        assert!(
-            range.start <= offset && range.end > offset,
-            "We got a range that doesn't even contain what we asked for."
-        );
-        // There is an entry overlapping this position, see if we have to split it
-        if range.start < offset {
-            let data = self.map.remove(&range).unwrap();
-            let old = self.map.insert(
-                Range {
-                    start: range.start,
-                    end: offset,
-                },
-                data.clone(),
-            );
-            assert!(old.is_none());
-            let old = self.map.insert(
-                Range {
-                    start: offset,
-                    end: range.end,
-                },
-                data,
-            );
-            assert!(old.is_none());
-        }
-    }
-
-    pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
-        self.map.values_mut()
-    }
-
-    /// Provide mutable iteration over everything in the given range.  As a side-effect,
-    /// this will split entries in the map that are only partially hit by the given range,
-    /// to make sure that when they are mutated, the effect is constrained to the given range.
-    pub fn iter_mut_with_gaps<'a>(
-        &'a mut self,
-        offset: u64,
-        len: u64,
-    ) -> impl Iterator<Item = &'a mut T> + 'a
-    where
-        T: Clone,
-    {
-        assert!(len > 0);
-        // Preparation: Split first and last entry as needed.
-        self.split_entry_at(offset);
-        self.split_entry_at(offset + len);
-        // Now we can provide a mutable iterator
-        self.map.range_mut(Range::range(offset, len)).filter_map(
-            move |(&range, data)| {
-                if range.overlaps(offset, len) {
-                    assert!(
-                        offset <= range.start && offset + len >= range.end,
-                        "The splitting went wrong"
-                    );
-                    Some(data)
-                } else {
-                    // Skip this one
-                    None
-                }
-            },
-        )
-    }
-
-    /// Provide a mutable iterator over everything in the given range, with the same side-effects as
-    /// iter_mut_with_gaps.  Furthermore, if there are gaps between ranges, fill them with the given default.
-    /// This is also how you insert.
-    pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item = &'a mut T> + 'a
-    where
-        T: Clone + Default,
-    {
-        // Do a first iteration to collect the gaps
-        let mut gaps = Vec::new();
-        let mut last_end = offset;
-        for (range, _) in self.iter_with_range(offset, len) {
-            if last_end < range.start {
-                gaps.push(Range {
-                    start: last_end,
-                    end: range.start,
-                });
-            }
-            last_end = range.end;
-        }
-        if last_end < offset + len {
-            gaps.push(Range {
-                start: last_end,
-                end: offset + len,
-            });
-        }
-
-        // Add default for all gaps
-        for gap in gaps {
-            let old = self.map.insert(gap, Default::default());
-            assert!(old.is_none());
-        }
-
-        // Now provide mutable iteration
-        self.iter_mut_with_gaps(offset, len)
-    }
-
-    pub fn retain<F>(&mut self, mut f: F)
-    where
-        F: FnMut(&T) -> bool,
-    {
-        let mut remove = Vec::new();
-        for (range, data) in self.map.iter() {
-            if !f(data) {
-                remove.push(*range);
-            }
-        }
-
-        for range in remove {
-            self.map.remove(&range);
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    /// Query the map at every offset in the range and collect the results.
-    fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
-        (offset..offset + len)
-            .into_iter()
-            .map(|i| *map.iter(i, 1).next().unwrap())
-            .collect()
-    }
-
-    #[test]
-    fn basic_insert() {
-        let mut map = RangeMap::<i32>::new();
-        // Insert
-        for x in map.iter_mut(10, 1) {
-            *x = 42;
-        }
-        // Check
-        assert_eq!(to_vec(&map, 10, 1), vec![42]);
-    }
-
-    #[test]
-    fn gaps() {
-        let mut map = RangeMap::<i32>::new();
-        for x in map.iter_mut(11, 1) {
-            *x = 42;
-        }
-        for x in map.iter_mut(15, 1) {
-            *x = 42;
-        }
-
-        // Now request a range that needs three gaps filled
-        for x in map.iter_mut(10, 10) {
-            if *x != 42 {
-                *x = 23;
-            }
-        }
-
-        assert_eq!(
-            to_vec(&map, 10, 10),
-            vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]
-        );
-        assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]);
-    }
-}
diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs
deleted file mode 100644 (file)
index c701ebf..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-//! This module contains the `EvalContext` methods for executing a single step of the interpreter.
-//!
-//! The main entry point is the `step` method.
-
-use rustc::hir::def_id::DefId;
-use rustc::hir;
-use rustc::mir::visit::{Visitor, LvalueContext};
-use rustc::mir;
-use rustc::traits::Reveal;
-use rustc::ty;
-use rustc::ty::layout::Layout;
-use rustc::ty::subst::Substs;
-use rustc::middle::const_val::ConstVal;
-
-use super::{EvalResult, EvalContext, StackPopCleanup, PtrAndAlign, GlobalId, Lvalue,
-            MemoryKind, Machine, PrimVal};
-
-use syntax::codemap::Span;
-use syntax::ast::Mutability;
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> {
-        self.steps_remaining = self.steps_remaining.saturating_sub(n);
-        if self.steps_remaining > 0 {
-            Ok(())
-        } else {
-            err!(ExecutionTimeLimitReached)
-        }
-    }
-
-    /// Returns true as long as there are more things to do.
-    pub fn step(&mut self) -> EvalResult<'tcx, bool> {
-        self.inc_step_counter_and_check_limit(1)?;
-        if self.stack.is_empty() {
-            return Ok(false);
-        }
-
-        let block = self.frame().block;
-        let stmt_id = self.frame().stmt;
-        let mir = self.mir();
-        let basic_block = &mir.basic_blocks()[block];
-
-        if let Some(stmt) = basic_block.statements.get(stmt_id) {
-            let mut new = Ok(0);
-            ConstantExtractor {
-                span: stmt.source_info.span,
-                instance: self.frame().instance,
-                ecx: self,
-                mir,
-                new_constants: &mut new,
-            }.visit_statement(
-                block,
-                stmt,
-                mir::Location {
-                    block,
-                    statement_index: stmt_id,
-                },
-            );
-            // if ConstantExtractor added new frames, we don't execute anything here
-            // but await the next call to step
-            if new? == 0 {
-                self.statement(stmt)?;
-            }
-            return Ok(true);
-        }
-
-        let terminator = basic_block.terminator();
-        let mut new = Ok(0);
-        ConstantExtractor {
-            span: terminator.source_info.span,
-            instance: self.frame().instance,
-            ecx: self,
-            mir,
-            new_constants: &mut new,
-        }.visit_terminator(
-            block,
-            terminator,
-            mir::Location {
-                block,
-                statement_index: stmt_id,
-            },
-        );
-        // if ConstantExtractor added new frames, we don't execute anything here
-        // but await the next call to step
-        if new? == 0 {
-            self.terminator(terminator)?;
-        }
-        Ok(true)
-    }
-
-    fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
-        trace!("{:?}", stmt);
-
-        use rustc::mir::StatementKind::*;
-
-        // Some statements (e.g. box) push new stack frames.  We have to record the stack frame number
-        // *before* executing the statement.
-        let frame_idx = self.cur_frame();
-
-        match stmt.kind {
-            Assign(ref lvalue, ref rvalue) => self.eval_rvalue_into_lvalue(rvalue, lvalue)?,
-
-            SetDiscriminant {
-                ref lvalue,
-                variant_index,
-            } => {
-                let dest = self.eval_lvalue(lvalue)?;
-                let dest_ty = self.lvalue_ty(lvalue);
-                let dest_layout = self.type_layout(dest_ty)?;
-
-                match *dest_layout {
-                    Layout::General { discr, .. } => {
-                        let discr_size = discr.size().bytes();
-                        let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
-                        self.memory.write_primval(
-                            dest_ptr,
-                            PrimVal::Bytes(variant_index as u128),
-                            discr_size,
-                            false
-                        )?
-                    }
-
-                    Layout::RawNullablePointer { nndiscr, .. } => {
-                        if variant_index as u64 != nndiscr {
-                            self.write_null(dest, dest_ty)?;
-                        }
-                    }
-
-                    Layout::StructWrappedNullablePointer {
-                        nndiscr,
-                        ref discrfield_source,
-                        ..
-                    } => {
-                        if variant_index as u64 != nndiscr {
-                            self.write_struct_wrapped_null_pointer(
-                                dest_ty,
-                                nndiscr,
-                                discrfield_source,
-                                dest,
-                            )?;
-                        }
-                    }
-
-                    _ => {
-                        bug!(
-                            "SetDiscriminant on {} represented as {:#?}",
-                            dest_ty,
-                            dest_layout
-                        )
-                    }
-                }
-            }
-
-            // Mark locals as alive
-            StorageLive(local) => {
-                let old_val = self.frame_mut().storage_live(local)?;
-                self.deallocate_local(old_val)?;
-            }
-
-            // Mark locals as dead
-            StorageDead(local) => {
-                let old_val = self.frame_mut().storage_dead(local)?;
-                self.deallocate_local(old_val)?;
-            }
-
-            // Validity checks.
-            Validate(op, ref lvalues) => {
-                for operand in lvalues {
-                    self.validation_op(op, operand)?;
-                }
-            }
-            EndRegion(ce) => {
-                self.end_region(Some(ce))?;
-            }
-
-            // Defined to do nothing. These are added by optimization passes, to avoid changing the
-            // size of MIR constantly.
-            Nop => {}
-
-            InlineAsm { .. } => return err!(InlineAsm),
-        }
-
-        self.stack[frame_idx].stmt += 1;
-        Ok(())
-    }
-
-    fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
-        trace!("{:?}", terminator.kind);
-        self.eval_terminator(terminator)?;
-        if !self.stack.is_empty() {
-            trace!("// {:?}", self.frame().block);
-        }
-        Ok(())
-    }
-
-    /// returns `true` if a stackframe was pushed
-    fn global_item(
-        &mut self,
-        def_id: DefId,
-        substs: &'tcx Substs<'tcx>,
-        span: Span,
-        mutability: Mutability,
-    ) -> EvalResult<'tcx, bool> {
-        let instance = self.resolve_associated_const(def_id, substs);
-        let cid = GlobalId {
-            instance,
-            promoted: None,
-        };
-        if self.globals.contains_key(&cid) {
-            return Ok(false);
-        }
-        if self.tcx.has_attr(def_id, "linkage") {
-            M::global_item_with_linkage(self, cid.instance, mutability)?;
-            return Ok(false);
-        }
-        let mir = self.load_mir(instance.def)?;
-        let size = self.type_size_with_substs(mir.return_ty, substs)?.expect(
-            "unsized global",
-        );
-        let align = self.type_align_with_substs(mir.return_ty, substs)?;
-        let ptr = self.memory.allocate(
-            size,
-            align,
-            MemoryKind::UninitializedStatic,
-        )?;
-        let aligned = !self.is_packed(mir.return_ty)?;
-        self.globals.insert(
-            cid,
-            PtrAndAlign {
-                ptr: ptr.into(),
-                aligned,
-            },
-        );
-        let internally_mutable = !mir.return_ty.is_freeze(
-            self.tcx,
-            ty::ParamEnv::empty(Reveal::All),
-            span,
-        );
-        let mutability = if mutability == Mutability::Mutable || internally_mutable {
-            Mutability::Mutable
-        } else {
-            Mutability::Immutable
-        };
-        let cleanup = StackPopCleanup::MarkStatic(mutability);
-        let name = ty::tls::with(|tcx| tcx.item_path_str(def_id));
-        trace!("pushing stack frame for global: {}", name);
-        self.push_stack_frame(
-            instance,
-            span,
-            mir,
-            Lvalue::from_ptr(ptr),
-            cleanup,
-        )?;
-        Ok(true)
-    }
-}
-
-// WARNING: This code pushes new stack frames.  Make sure that any methods implemented on this
-// type don't ever access ecx.stack[ecx.cur_frame()], as that will change. This includes, e.g.,
-// using the current stack frame's substitution.
-// Basically don't call anything other than `load_mir`, `alloc_ptr`, `push_stack_frame`.
-struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b, M: Machine<'tcx> + 'a> {
-    span: Span,
-    ecx: &'a mut EvalContext<'b, 'tcx, M>,
-    mir: &'tcx mir::Mir<'tcx>,
-    instance: ty::Instance<'tcx>,
-    new_constants: &'a mut EvalResult<'tcx, u64>,
-}
-
-impl<'a, 'b, 'tcx, M: Machine<'tcx>> ConstantExtractor<'a, 'b, 'tcx, M> {
-    fn try<F: FnOnce(&mut Self) -> EvalResult<'tcx, bool>>(&mut self, f: F) {
-        // previous constant errored
-        let n = match *self.new_constants {
-            Ok(n) => n,
-            Err(_) => return,
-        };
-        match f(self) {
-            // everything ok + a new stackframe
-            Ok(true) => *self.new_constants = Ok(n + 1),
-            // constant correctly evaluated, but no new stackframe
-            Ok(false) => {}
-            // constant eval errored
-            Err(err) => *self.new_constants = Err(err),
-        }
-    }
-}
-
-impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx, M> {
-    fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) {
-        self.super_constant(constant, location);
-        match constant.literal {
-            // already computed by rustc
-            mir::Literal::Value { value: &ty::Const { val: ConstVal::Unevaluated(def_id, substs), .. } } => {
-                self.try(|this| {
-                    this.ecx.global_item(
-                        def_id,
-                        substs,
-                        constant.span,
-                        Mutability::Immutable,
-                    )
-                });
-            }
-            mir::Literal::Value { .. } => {}
-            mir::Literal::Promoted { index } => {
-                let cid = GlobalId {
-                    instance: self.instance,
-                    promoted: Some(index),
-                };
-                if self.ecx.globals.contains_key(&cid) {
-                    return;
-                }
-                let mir = &self.mir.promoted[index];
-                self.try(|this| {
-                    let size = this.ecx
-                        .type_size_with_substs(mir.return_ty, this.instance.substs)?
-                        .expect("unsized global");
-                    let align = this.ecx.type_align_with_substs(
-                        mir.return_ty,
-                        this.instance.substs,
-                    )?;
-                    let ptr = this.ecx.memory.allocate(
-                        size,
-                        align,
-                        MemoryKind::UninitializedStatic,
-                    )?;
-                    let aligned = !this.ecx.is_packed(mir.return_ty)?;
-                    this.ecx.globals.insert(
-                        cid,
-                        PtrAndAlign {
-                            ptr: ptr.into(),
-                            aligned,
-                        },
-                    );
-                    trace!("pushing stack frame for {:?}", index);
-                    this.ecx.push_stack_frame(
-                        this.instance,
-                        constant.span,
-                        mir,
-                        Lvalue::from_ptr(ptr),
-                        StackPopCleanup::MarkStatic(Mutability::Immutable),
-                    )?;
-                    Ok(true)
-                });
-            }
-        }
-    }
-
-    fn visit_lvalue(
-        &mut self,
-        lvalue: &mir::Lvalue<'tcx>,
-        context: LvalueContext<'tcx>,
-        location: mir::Location,
-    ) {
-        self.super_lvalue(lvalue, context, location);
-        if let mir::Lvalue::Static(ref static_) = *lvalue {
-            let def_id = static_.def_id;
-            let substs = self.ecx.tcx.intern_substs(&[]);
-            let span = self.span;
-            if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) {
-                if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item {
-                    if let hir::ItemStatic(_, m, _) = *node {
-                        self.try(|this| {
-                            this.ecx.global_item(
-                                def_id,
-                                substs,
-                                span,
-                                if m == hir::MutMutable {
-                                    Mutability::Mutable
-                                } else {
-                                    Mutability::Immutable
-                                },
-                            )
-                        });
-                        return;
-                    } else {
-                        bug!("static def id doesn't point to static");
-                    }
-                } else {
-                    bug!("static def id doesn't point to item");
-                }
-            } else {
-                let def = self.ecx.tcx.describe_def(def_id).expect("static not found");
-                if let hir::def::Def::Static(_, mutable) = def {
-                    self.try(|this| {
-                        this.ecx.global_item(
-                            def_id,
-                            substs,
-                            span,
-                            if mutable {
-                                Mutability::Mutable
-                            } else {
-                                Mutability::Immutable
-                            },
-                        )
-                    });
-                } else {
-                    bug!("static found but isn't a static: {:?}", def);
-                }
-            }
-        }
-    }
-}
diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs
deleted file mode 100644 (file)
index 6596cf9..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-use rustc::mir::BasicBlock;
-use rustc::ty::{self, Ty};
-use syntax::codemap::Span;
-
-use interpret::{EvalResult, EvalContext, Lvalue, LvalueExtra, PrimVal, Value,
-                Machine, ValTy};
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    pub(crate) fn drop_lvalue(
-        &mut self,
-        lval: Lvalue,
-        instance: ty::Instance<'tcx>,
-        ty: Ty<'tcx>,
-        span: Span,
-        target: BasicBlock,
-    ) -> EvalResult<'tcx> {
-        trace!("drop_lvalue: {:#?}", lval);
-        // We take the address of the object.  This may well be unaligned, which is fine for us here.
-        // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
-        // by rustc.
-        let val = match self.force_allocation(lval)? {
-            Lvalue::Ptr {
-                ptr,
-                extra: LvalueExtra::Vtable(vtable),
-            } => ptr.ptr.to_value_with_vtable(vtable),
-            Lvalue::Ptr {
-                ptr,
-                extra: LvalueExtra::Length(len),
-            } => ptr.ptr.to_value_with_len(len),
-            Lvalue::Ptr {
-                ptr,
-                extra: LvalueExtra::None,
-            } => ptr.ptr.to_value(),
-            _ => bug!("force_allocation broken"),
-        };
-        self.drop(val, instance, ty, span, target)
-    }
-
-    fn drop(
-        &mut self,
-        arg: Value,
-        instance: ty::Instance<'tcx>,
-        ty: Ty<'tcx>,
-        span: Span,
-        target: BasicBlock,
-    ) -> EvalResult<'tcx> {
-        trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
-
-        let instance = match ty.sty {
-            ty::TyDynamic(..) => {
-                let vtable = match arg {
-                    Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable,
-                    _ => bug!("expected fat ptr, got {:?}", arg),
-                };
-                match self.read_drop_type_from_vtable(vtable)? {
-                    Some(func) => func,
-                    // no drop fn -> bail out
-                    None => {
-                        self.goto_block(target);
-                        return Ok(())
-                    },
-                }
-            }
-            _ => instance,
-        };
-
-        // the drop function expects a reference to the value
-        let valty = ValTy {
-            value: arg,
-            ty: self.tcx.mk_mut_ptr(ty),
-        };
-
-        let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone();
-
-        self.eval_fn_call(
-            instance,
-            Some((Lvalue::undef(), target)),
-            &vec![valty],
-            span,
-            fn_sig,
-        )
-    }
-}
diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs
deleted file mode 100644 (file)
index e01777c..0000000
+++ /dev/null
@@ -1,411 +0,0 @@
-use rustc::mir;
-use rustc::ty::{self, TypeVariants};
-use rustc::ty::layout::Layout;
-use syntax::codemap::Span;
-use syntax::abi::Abi;
-
-use super::{EvalResult, EvalContext, eval_context,
-            PtrAndAlign, Lvalue, PrimVal, Value, Machine, ValTy};
-
-use rustc_data_structures::indexed_vec::Idx;
-
-mod drop;
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    pub fn goto_block(&mut self, target: mir::BasicBlock) {
-        self.frame_mut().block = target;
-        self.frame_mut().stmt = 0;
-    }
-
-    pub(super) fn eval_terminator(
-        &mut self,
-        terminator: &mir::Terminator<'tcx>,
-    ) -> EvalResult<'tcx> {
-        use rustc::mir::TerminatorKind::*;
-        match terminator.kind {
-            Return => {
-                self.dump_local(self.frame().return_lvalue);
-                self.pop_stack_frame()?
-            }
-
-            Goto { target } => self.goto_block(target),
-
-            SwitchInt {
-                ref discr,
-                ref values,
-                ref targets,
-                ..
-            } => {
-                // FIXME(CTFE): forbid branching
-                let discr_val = self.eval_operand(discr)?;
-                let discr_prim = self.value_to_primval(discr_val)?;
-
-                // Branch to the `otherwise` case by default, if no match is found.
-                let mut target_block = targets[targets.len() - 1];
-
-                for (index, const_int) in values.iter().enumerate() {
-                    let prim = PrimVal::Bytes(const_int.to_u128_unchecked());
-                    if discr_prim.to_bytes()? == prim.to_bytes()? {
-                        target_block = targets[index];
-                        break;
-                    }
-                }
-
-                self.goto_block(target_block);
-            }
-
-            Call {
-                ref func,
-                ref args,
-                ref destination,
-                ..
-            } => {
-                let destination = match *destination {
-                    Some((ref lv, target)) => Some((self.eval_lvalue(lv)?, target)),
-                    None => None,
-                };
-
-                let func_ty = self.operand_ty(func);
-                let (fn_def, sig) = match func_ty.sty {
-                    ty::TyFnPtr(sig) => {
-                        let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?;
-                        let instance = self.memory.get_fn(fn_ptr)?;
-                        let instance_ty = instance.def.def_ty(self.tcx);
-                        let instance_ty = self.monomorphize(instance_ty, instance.substs);
-                        match instance_ty.sty {
-                            ty::TyFnDef(..) => {
-                                let real_sig = instance_ty.fn_sig(self.tcx);
-                                let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
-                                let real_sig = self.tcx.erase_late_bound_regions_and_normalize(&real_sig);
-                                if !self.check_sig_compat(sig, real_sig)? {
-                                    return err!(FunctionPointerTyMismatch(real_sig, sig));
-                                }
-                            }
-                            ref other => bug!("instance def ty: {:?}", other),
-                        }
-                        (instance, sig)
-                    }
-                    ty::TyFnDef(def_id, substs) => (
-                        eval_context::resolve(self.tcx, def_id, substs),
-                        func_ty.fn_sig(self.tcx),
-                    ),
-                    _ => {
-                        let msg = format!("can't handle callee of type {:?}", func_ty);
-                        return err!(Unimplemented(msg));
-                    }
-                };
-                let args = self.operands_to_args(args)?;
-                let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
-                self.eval_fn_call(
-                    fn_def,
-                    destination,
-                    &args,
-                    terminator.source_info.span,
-                    sig,
-                )?;
-            }
-
-            Drop {
-                ref location,
-                target,
-                ..
-            } => {
-                // FIXME(CTFE): forbid drop in const eval
-                let lval = self.eval_lvalue(location)?;
-                let ty = self.lvalue_ty(location);
-                let ty = eval_context::apply_param_substs(self.tcx, self.substs(), &ty);
-                trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
-
-                let instance = eval_context::resolve_drop_in_place(self.tcx, ty);
-                self.drop_lvalue(
-                    lval,
-                    instance,
-                    ty,
-                    terminator.source_info.span,
-                    target,
-                )?;
-            }
-
-            Assert {
-                ref cond,
-                expected,
-                ref msg,
-                target,
-                ..
-            } => {
-                let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?;
-                if expected == cond_val {
-                    self.goto_block(target);
-                } else {
-                    use rustc::mir::AssertMessage::*;
-                    return match *msg {
-                        BoundsCheck { ref len, ref index } => {
-                            let span = terminator.source_info.span;
-                            let len = self.eval_operand_to_primval(len)
-                                .expect("can't eval len")
-                                .to_u64()?;
-                            let index = self.eval_operand_to_primval(index)
-                                .expect("can't eval index")
-                                .to_u64()?;
-                            err!(ArrayIndexOutOfBounds(span, len, index))
-                        }
-                        Math(ref err) => {
-                            err!(Math(terminator.source_info.span, err.clone()))
-                        }
-                        GeneratorResumedAfterReturn |
-                        GeneratorResumedAfterPanic => unimplemented!(),
-                    };
-                }
-            }
-
-            Yield { .. } => unimplemented!("{:#?}", terminator.kind),
-            GeneratorDrop => unimplemented!(),
-            DropAndReplace { .. } => unimplemented!(),
-            Resume => unimplemented!(),
-            Unreachable => return err!(Unreachable),
-        }
-
-        Ok(())
-    }
-
-    /// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`.
-    /// FIXME: This should take into account the platform-dependent ABI description.
-    fn check_sig_compat(
-        &mut self,
-        sig: ty::FnSig<'tcx>,
-        real_sig: ty::FnSig<'tcx>,
-    ) -> EvalResult<'tcx, bool> {
-        fn check_ty_compat<'tcx>(ty: ty::Ty<'tcx>, real_ty: ty::Ty<'tcx>) -> bool {
-            if ty == real_ty {
-                return true;
-            } // This is actually a fast pointer comparison
-            return match (&ty.sty, &real_ty.sty) {
-                // Permit changing the pointer type of raw pointers and references as well as
-                // mutability of raw pointers.
-                // TODO: Should not be allowed when fat pointers are involved.
-                (&TypeVariants::TyRawPtr(_), &TypeVariants::TyRawPtr(_)) => true,
-                (&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => {
-                    ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
-                }
-                // rule out everything else
-                _ => false,
-            };
-        }
-
-        if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic &&
-            sig.inputs_and_output.len() == real_sig.inputs_and_output.len() &&
-            sig.inputs_and_output
-                .iter()
-                .zip(real_sig.inputs_and_output)
-                .all(|(ty, real_ty)| check_ty_compat(ty, real_ty))
-        {
-            // Definitely good.
-            return Ok(true);
-        }
-
-        if sig.variadic || real_sig.variadic {
-            // We're not touching this
-            return Ok(false);
-        }
-
-        // We need to allow what comes up when a non-capturing closure is cast to a fn().
-        match (sig.abi, real_sig.abi) {
-            (Abi::Rust, Abi::RustCall) // check the ABIs.  This makes the test here non-symmetric.
-                if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => {
-                // First argument of real_sig must be a ZST
-                let fst_ty = real_sig.inputs_and_output[0];
-                let layout = self.type_layout(fst_ty)?;
-                let size = layout.size(&self.tcx.data_layout).bytes();
-                if size == 0 {
-                    // Second argument must be a tuple matching the argument list of sig
-                    let snd_ty = real_sig.inputs_and_output[1];
-                    match snd_ty.sty {
-                        TypeVariants::TyTuple(tys, _) if sig.inputs().len() == tys.len() =>
-                            if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
-                                return Ok(true)
-                            },
-                        _ => {}
-                    }
-                }
-            }
-            _ => {}
-        };
-
-        // Nope, this doesn't work.
-        return Ok(false);
-    }
-
-    fn eval_fn_call(
-        &mut self,
-        instance: ty::Instance<'tcx>,
-        destination: Option<(Lvalue, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
-        span: Span,
-        sig: ty::FnSig<'tcx>,
-    ) -> EvalResult<'tcx> {
-        trace!("eval_fn_call: {:#?}", instance);
-        match instance.def {
-            ty::InstanceDef::Intrinsic(..) => {
-                let (ret, target) = match destination {
-                    Some(dest) => dest,
-                    _ => return err!(Unreachable),
-                };
-                let ty = sig.output();
-                let layout = self.type_layout(ty)?;
-                M::call_intrinsic(self, instance, args, ret, ty, layout, target)?;
-                self.dump_local(ret);
-                Ok(())
-            }
-            // FIXME: figure out why we can't just go through the shim
-            ty::InstanceDef::ClosureOnceShim { .. } => {
-                if M::eval_fn_call(self, instance, destination, args, span, sig)? {
-                    return Ok(());
-                }
-                let mut arg_locals = self.frame().mir.args_iter();
-                match sig.abi {
-                    // closure as closure once
-                    Abi::RustCall => {
-                        for (arg_local, &valty) in arg_locals.zip(args) {
-                            let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
-                            self.write_value(valty, dest)?;
-                        }
-                    }
-                    // non capture closure as fn ptr
-                    // need to inject zst ptr for closure object (aka do nothing)
-                    // and need to pack arguments
-                    Abi::Rust => {
-                        trace!(
-                            "arg_locals: {:?}",
-                            self.frame().mir.args_iter().collect::<Vec<_>>()
-                        );
-                        trace!("args: {:?}", args);
-                        let local = arg_locals.nth(1).unwrap();
-                        for (i, &valty) in args.into_iter().enumerate() {
-                            let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(
-                                mir::Field::new(i),
-                                valty.ty,
-                            ))?;
-                            self.write_value(valty, dest)?;
-                        }
-                    }
-                    _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
-                }
-                Ok(())
-            }
-            ty::InstanceDef::FnPtrShim(..) |
-            ty::InstanceDef::DropGlue(..) |
-            ty::InstanceDef::CloneShim(..) |
-            ty::InstanceDef::Item(_) => {
-                // Push the stack frame, and potentially be entirely done if the call got hooked
-                if M::eval_fn_call(self, instance, destination, args, span, sig)? {
-                    return Ok(());
-                }
-
-                // Pass the arguments
-                let mut arg_locals = self.frame().mir.args_iter();
-                trace!("ABI: {:?}", sig.abi);
-                trace!(
-                    "arg_locals: {:?}",
-                    self.frame().mir.args_iter().collect::<Vec<_>>()
-                );
-                trace!("args: {:?}", args);
-                match sig.abi {
-                    Abi::RustCall => {
-                        assert_eq!(args.len(), 2);
-
-                        {
-                            // write first argument
-                            let first_local = arg_locals.next().unwrap();
-                            let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?;
-                            self.write_value(args[0], dest)?;
-                        }
-
-                        // unpack and write all other args
-                        let layout = self.type_layout(args[1].ty)?;
-                        if let (&ty::TyTuple(fields, _),
-                                &Layout::Univariant { ref variant, .. }) = (&args[1].ty.sty, layout)
-                        {
-                            trace!("fields: {:?}", fields);
-                            if self.frame().mir.args_iter().count() == fields.len() + 1 {
-                                let offsets = variant.offsets.iter().map(|s| s.bytes());
-                                match args[1].value {
-                                    Value::ByRef(PtrAndAlign { ptr, aligned }) => {
-                                        assert!(
-                                            aligned,
-                                            "Unaligned ByRef-values cannot occur as function arguments"
-                                        );
-                                        for ((offset, ty), arg_local) in
-                                            offsets.zip(fields).zip(arg_locals)
-                                        {
-                                            let arg = Value::by_ref(ptr.offset(offset, &self)?);
-                                            let dest =
-                                                self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
-                                            trace!(
-                                                "writing arg {:?} to {:?} (type: {})",
-                                                arg,
-                                                dest,
-                                                ty
-                                            );
-                                            let valty = ValTy {
-                                                value: arg,
-                                                ty,
-                                            };
-                                            self.write_value(valty, dest)?;
-                                        }
-                                    }
-                                    Value::ByVal(PrimVal::Undef) => {}
-                                    other => {
-                                        assert_eq!(fields.len(), 1);
-                                        let dest = self.eval_lvalue(&mir::Lvalue::Local(
-                                            arg_locals.next().unwrap(),
-                                        ))?;
-                                        let valty = ValTy {
-                                            value: other,
-                                            ty: fields[0],
-                                        };
-                                        self.write_value(valty, dest)?;
-                                    }
-                                }
-                            } else {
-                                trace!("manual impl of rust-call ABI");
-                                // called a manual impl of a rust-call function
-                                let dest = self.eval_lvalue(
-                                    &mir::Lvalue::Local(arg_locals.next().unwrap()),
-                                )?;
-                                self.write_value(args[1], dest)?;
-                            }
-                        } else {
-                            bug!(
-                                "rust-call ABI tuple argument was {:#?}, {:#?}",
-                                args[1].ty,
-                                layout
-                            );
-                        }
-                    }
-                    _ => {
-                        for (arg_local, &valty) in arg_locals.zip(args) {
-                            let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
-                            self.write_value(valty, dest)?;
-                        }
-                    }
-                }
-                Ok(())
-            }
-            // cannot use the shim here, because that will only result in infinite recursion
-            ty::InstanceDef::Virtual(_, idx) => {
-                let ptr_size = self.memory.pointer_size();
-                let (ptr, vtable) = args[0].into_ptr_vtable_pair(&self.memory)?;
-                let fn_ptr = self.memory.read_ptr_sized_unsigned(
-                    vtable.offset(ptr_size * (idx as u64 + 3), &self)?
-                )?.to_ptr()?;
-                let instance = self.memory.get_fn(fn_ptr)?;
-                let mut args = args.to_vec();
-                let ty = self.get_field_ty(args[0].ty, 0)?.ty; // TODO: packed flag is ignored
-                args[0].ty = ty;
-                args[0].value = ptr.to_value();
-                // recurse with concrete function
-                self.eval_fn_call(instance, destination, &args, span, sig)
-            }
-        }
-    }
-}
diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs
deleted file mode 100644 (file)
index 3f7e10a..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-use rustc::traits::{self, Reveal};
-use rustc::hir::def_id::DefId;
-use rustc::ty::subst::Substs;
-use rustc::ty::{self, Ty};
-use syntax::codemap::DUMMY_SP;
-use syntax::ast::{self, Mutability};
-
-use super::{EvalResult, EvalContext, eval_context, MemoryPointer, MemoryKind, Value, PrimVal,
-            Machine};
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    pub(crate) fn fulfill_obligation(
-        &self,
-        trait_ref: ty::PolyTraitRef<'tcx>,
-    ) -> traits::Vtable<'tcx, ()> {
-        // Do the initial selection for the obligation. This yields the shallow result we are
-        // looking for -- that is, what specific impl.
-        self.tcx.infer_ctxt().enter(|infcx| {
-            let mut selcx = traits::SelectionContext::new(&infcx);
-
-            let obligation = traits::Obligation::new(
-                traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID),
-                ty::ParamEnv::empty(Reveal::All),
-                trait_ref.to_poly_trait_predicate(),
-            );
-            let selection = selcx.select(&obligation).unwrap().unwrap();
-
-            // Currently, we use a fulfillment context to completely resolve all nested obligations.
-            // This is because they can inform the inference of the impl's type parameters.
-            let mut fulfill_cx = traits::FulfillmentContext::new();
-            let vtable = selection.map(|predicate| {
-                fulfill_cx.register_predicate_obligation(&infcx, predicate);
-            });
-            infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable)
-        })
-    }
-
-    /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
-    /// objects.
-    ///
-    /// The `trait_ref` encodes the erased self type. Hence if we are
-    /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
-    /// `trait_ref` would map `T:Trait`.
-    pub fn get_vtable(
-        &mut self,
-        ty: Ty<'tcx>,
-        trait_ref: ty::PolyTraitRef<'tcx>,
-    ) -> EvalResult<'tcx, MemoryPointer> {
-        debug!("get_vtable(trait_ref={:?})", trait_ref);
-
-        let size = self.type_size(trait_ref.self_ty())?.expect(
-            "can't create a vtable for an unsized type",
-        );
-        let align = self.type_align(trait_ref.self_ty())?;
-
-        let ptr_size = self.memory.pointer_size();
-        let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref);
-        let vtable = self.memory.allocate(
-            ptr_size * (3 + methods.count() as u64),
-            ptr_size,
-            MemoryKind::UninitializedStatic,
-        )?;
-
-        let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
-        let drop = self.memory.create_fn_alloc(drop);
-        self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?;
-
-        let size_ptr = vtable.offset(ptr_size, &self)?;
-        self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?;
-        let align_ptr = vtable.offset(ptr_size * 2, &self)?;
-        self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?;
-
-        for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() {
-            if let Some((def_id, substs)) = method {
-                let instance = eval_context::resolve(self.tcx, def_id, substs);
-                let fn_ptr = self.memory.create_fn_alloc(instance);
-                let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
-                self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?;
-            }
-        }
-
-        self.memory.mark_static_initalized(
-            vtable.alloc_id,
-            Mutability::Mutable,
-        )?;
-
-        Ok(vtable)
-    }
-
-    pub fn read_drop_type_from_vtable(
-        &self,
-        vtable: MemoryPointer,
-    ) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
-        // we don't care about the pointee type, we just want a pointer
-        match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? {
-            // some values don't need to call a drop impl, so the value is null
-            Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
-            Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
-            _ => err!(ReadBytesAsPointer),
-        }
-    }
-
-    pub fn read_size_and_align_from_vtable(
-        &self,
-        vtable: MemoryPointer,
-    ) -> EvalResult<'tcx, (u64, u64)> {
-        let pointer_size = self.memory.pointer_size();
-        let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64;
-        let align = self.memory.read_ptr_sized_unsigned(
-            vtable.offset(pointer_size * 2, self)?
-        )?.to_bytes()? as u64;
-        Ok((size, align))
-    }
-
-    pub(crate) fn resolve_associated_const(
-        &self,
-        def_id: DefId,
-        substs: &'tcx Substs<'tcx>,
-    ) -> ty::Instance<'tcx> {
-        if let Some(trait_id) = self.tcx.trait_of_item(def_id) {
-            let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs));
-            let vtable = self.fulfill_obligation(trait_ref);
-            if let traits::VtableImpl(vtable_impl) = vtable {
-                let name = self.tcx.item_name(def_id);
-                let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id).find(
-                    |item| {
-                        item.kind == ty::AssociatedKind::Const && item.name == name
-                    },
-                );
-                if let Some(assoc_const) = assoc_const_opt {
-                    return ty::Instance::new(assoc_const.def_id, vtable_impl.substs);
-                }
-            }
-        }
-        ty::Instance::new(def_id, substs)
-    }
-}
diff --git a/src/librustc_mir/interpret/validation.rs b/src/librustc_mir/interpret/validation.rs
deleted file mode 100644 (file)
index 9be9341..0000000
+++ /dev/null
@@ -1,727 +0,0 @@
-use rustc::hir::{self, Mutability};
-use rustc::hir::Mutability::*;
-use rustc::mir::{self, ValidationOp, ValidationOperand};
-use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
-use rustc::ty::subst::{Substs, Subst};
-use rustc::traits;
-use rustc::infer::InferCtxt;
-use rustc::traits::Reveal;
-use rustc::middle::region;
-use rustc_data_structures::indexed_vec::Idx;
-
-use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value,
-            Lvalue, LvalueExtra, Machine, ValTy};
-
-pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsLvalue<'tcx>, Lvalue)>;
-
-#[derive(Copy, Clone, Debug, PartialEq)]
-enum ValidationMode {
-    Acquire,
-    /// Recover because the given region ended
-    Recover(region::Scope),
-    ReleaseUntil(Option<region::Scope>),
-}
-
-impl ValidationMode {
-    fn acquiring(self) -> bool {
-        use self::ValidationMode::*;
-        match self {
-            Acquire | Recover(_) => true,
-            ReleaseUntil(_) => false,
-        }
-    }
-}
-
-// Abstract lvalues
-#[derive(Clone, Debug, PartialEq, Eq, Hash)]
-pub enum AbsLvalue<'tcx> {
-    Local(mir::Local),
-    Static(hir::def_id::DefId),
-    Projection(Box<AbsLvalueProjection<'tcx>>),
-}
-
-type AbsLvalueProjection<'tcx> = mir::Projection<'tcx, AbsLvalue<'tcx>, u64, ()>;
-type AbsLvalueElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
-
-impl<'tcx> AbsLvalue<'tcx> {
-    pub fn field(self, f: mir::Field) -> AbsLvalue<'tcx> {
-        self.elem(mir::ProjectionElem::Field(f, ()))
-    }
-
-    pub fn deref(self) -> AbsLvalue<'tcx> {
-        self.elem(mir::ProjectionElem::Deref)
-    }
-
-    pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsLvalue<'tcx> {
-        self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
-    }
-
-    pub fn index(self, index: u64) -> AbsLvalue<'tcx> {
-        self.elem(mir::ProjectionElem::Index(index))
-    }
-
-    fn elem(self, elem: AbsLvalueElem<'tcx>) -> AbsLvalue<'tcx> {
-        AbsLvalue::Projection(Box::new(AbsLvalueProjection {
-            base: self,
-            elem,
-        }))
-    }
-}
-
-impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
-    fn abstract_lvalue_projection(&self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, AbsLvalueProjection<'tcx>> {
-        use self::mir::ProjectionElem::*;
-
-        let elem = match proj.elem {
-            Deref => Deref,
-            Field(f, _) => Field(f, ()),
-            Index(v) => {
-                let value = self.frame().get_local(v)?;
-                let ty = self.tcx.types.usize;
-                let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
-                Index(n)
-            },
-            ConstantIndex { offset, min_length, from_end } =>
-                ConstantIndex { offset, min_length, from_end },
-            Subslice { from, to } =>
-                Subslice { from, to },
-            Downcast(adt, sz) => Downcast(adt, sz),
-        };
-        Ok(AbsLvalueProjection {
-            base: self.abstract_lvalue(&proj.base)?,
-            elem
-        })
-    }
-
-    fn abstract_lvalue(&self, lval: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, AbsLvalue<'tcx>> {
-        Ok(match lval {
-            &mir::Lvalue::Local(l) => AbsLvalue::Local(l),
-            &mir::Lvalue::Static(ref s) => AbsLvalue::Static(s.def_id),
-            &mir::Lvalue::Projection(ref p) =>
-                AbsLvalue::Projection(Box::new(self.abstract_lvalue_projection(&*p)?)),
-        })
-    }
-
-    // Validity checks
-    pub(crate) fn validation_op(
-        &mut self,
-        op: ValidationOp,
-        operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>,
-    ) -> EvalResult<'tcx> {
-        // If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands
-        // because other crates may have been compiled with mir-emit-validate > 0.  Ignore those
-        // commands.  This makes mir-emit-validate also a flag to control whether miri will do
-        // validation or not.
-        if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
-            return Ok(());
-        }
-        debug_assert!(self.memory.cur_frame == self.cur_frame());
-
-        // HACK: Determine if this method is whitelisted and hence we do not perform any validation.
-        // We currently insta-UB on anything passing around uninitialized memory, so we have to whitelist
-        // the places that are allowed to do that.
-        // The second group is stuff libstd does that is forbidden even under relaxed validation.
-        {
-            // The regexp we use for filtering
-            use regex::Regex;
-            lazy_static! {
-                static ref RE: Regex = Regex::new("^(\
-                    (std|alloc::heap::__core)::mem::(uninitialized|forget)::|\
-                    <(std|alloc)::heap::Heap as (std::heap|alloc::allocator)::Alloc>::|\
-                    <(std|alloc::heap::__core)::mem::ManuallyDrop<T>><.*>::new$|\
-                    <(std|alloc::heap::__core)::mem::ManuallyDrop<T> as std::ops::DerefMut><.*>::deref_mut$|\
-                    (std|alloc::heap::__core)::ptr::read::|\
-                    \
-                    <std::sync::Arc<T>><.*>::inner$|\
-                    <std::sync::Arc<T>><.*>::drop_slow$|\
-                    (std::heap|alloc::allocator)::Layout::for_value::|\
-                    (std|alloc::heap::__core)::mem::(size|align)_of_val::\
-                )").unwrap();
-            }
-            // Now test
-            let name = self.stack[self.cur_frame()].instance.to_string();
-            if RE.is_match(&name) {
-                return Ok(());
-            }
-        }
-
-        // We need to monomorphize ty *without* erasing lifetimes
-        let ty = operand.ty.subst(self.tcx, self.substs());
-        let lval = self.eval_lvalue(&operand.lval)?;
-        let abs_lval = self.abstract_lvalue(&operand.lval)?;
-        let query = ValidationQuery {
-            lval: (abs_lval, lval),
-            ty,
-            re: operand.re,
-            mutbl: operand.mutbl,
-        };
-
-        // Check the mode, and also perform mode-specific operations
-        let mode = match op {
-            ValidationOp::Acquire => ValidationMode::Acquire,
-            ValidationOp::Release => ValidationMode::ReleaseUntil(None),
-            ValidationOp::Suspend(scope) => {
-                if query.mutbl == MutMutable {
-                    let lft = DynamicLifetime {
-                        frame: self.cur_frame(),
-                        region: Some(scope), // Notably, we only ever suspend things for given regions.
-                        // Suspending for the entire function does not make any sense.
-                    };
-                    trace!("Suspending {:?} until {:?}", query, scope);
-                    self.suspended.entry(lft).or_insert_with(Vec::new).push(
-                        query.clone(),
-                    );
-                }
-                ValidationMode::ReleaseUntil(Some(scope))
-            }
-        };
-        self.validate(query, mode)
-    }
-
-    /// Release locks and executes suspensions of the given region (or the entire fn, in case of None).
-    pub(crate) fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx> {
-        debug_assert!(self.memory.cur_frame == self.cur_frame());
-        self.memory.locks_lifetime_ended(scope);
-        match scope {
-            Some(scope) => {
-                // Recover suspended lvals
-                let lft = DynamicLifetime {
-                    frame: self.cur_frame(),
-                    region: Some(scope),
-                };
-                if let Some(queries) = self.suspended.remove(&lft) {
-                    for query in queries {
-                        trace!("Recovering {:?} from suspension", query);
-                        self.validate(query, ValidationMode::Recover(scope))?;
-                    }
-                }
-            }
-            None => {
-                // Clean suspension table of current frame
-                let cur_frame = self.cur_frame();
-                self.suspended.retain(|lft, _| {
-                    lft.frame != cur_frame // keep only what is in the other (lower) frames
-                });
-            }
-        }
-        Ok(())
-    }
-
-    fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        return normalize_associated_type(self.tcx, &ty);
-
-        use syntax::codemap::{Span, DUMMY_SP};
-
-        // We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
-        fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
-            self_: &InferCtxt<'a, 'gcx, 'tcx>,
-            param_env: ty::ParamEnv<'tcx>,
-            value: &T,
-        ) -> T::Lifted
-        where
-            T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
-        {
-            let mut selcx = traits::SelectionContext::new(self_);
-            let cause = traits::ObligationCause::dummy();
-            let traits::Normalized {
-                value: result,
-                obligations,
-            } = traits::normalize(&mut selcx, param_env, cause, value);
-
-            let mut fulfill_cx = traits::FulfillmentContext::new();
-
-            for obligation in obligations {
-                fulfill_cx.register_predicate_obligation(self_, obligation);
-            }
-
-            drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
-        }
-
-        fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
-            self_: &InferCtxt<'a, 'gcx, 'tcx>,
-            span: Span,
-            fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
-            result: &T,
-        ) -> T::Lifted
-        where
-            T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
-        {
-            // In principle, we only need to do this so long as `result`
-            // contains unbound type parameters. It could be a slight
-            // optimization to stop iterating early.
-            match fulfill_cx.select_all_or_error(self_) {
-                Ok(()) => { }
-                Err(errors) => {
-                    span_bug!(
-                        span,
-                        "Encountered errors `{:?}` resolving bounds after type-checking",
-                        errors
-                    );
-                }
-            }
-
-            let result = self_.resolve_type_vars_if_possible(result);
-            let result = self_.tcx.fold_regions(
-                &result,
-                &mut false,
-                |r, _| match *r {
-                    ty::ReVar(_) => self_.tcx.types.re_erased,
-                    _ => r,
-                },
-            );
-
-            match self_.tcx.lift_to_global(&result) {
-                Some(result) => result,
-                None => {
-                    span_bug!(span, "Uninferred types/regions in `{:?}`", result);
-                }
-            }
-        }
-
-        trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
-            fn my_trans_normalize<'a, 'tcx>(
-                &self,
-                infcx: &InferCtxt<'a, 'gcx, 'tcx>,
-                param_env: ty::ParamEnv<'tcx>,
-            ) -> Self;
-        }
-
-        macro_rules! items { ($($item:item)+) => ($($item)+) }
-        macro_rules! impl_trans_normalize {
-            ($lt_gcx:tt, $($ty:ty),+) => {
-                items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
-                    fn my_trans_normalize<'a, 'tcx>(&self,
-                                                infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
-                                                param_env: ty::ParamEnv<'tcx>)
-                                                -> Self {
-                        normalize_projections_in(infcx, param_env, self)
-                    }
-                })+);
-            }
-        }
-
-        impl_trans_normalize!('gcx,
-            Ty<'gcx>,
-            &'gcx Substs<'gcx>,
-            ty::FnSig<'gcx>,
-            ty::PolyFnSig<'gcx>,
-            ty::ClosureSubsts<'gcx>,
-            ty::PolyTraitRef<'gcx>,
-            ty::ExistentialTraitRef<'gcx>
-        );
-
-        fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
-        where
-            T: MyTransNormalize<'tcx>,
-        {
-            let param_env = ty::ParamEnv::empty(Reveal::All);
-
-            if !value.has_projections() {
-                return value.clone();
-            }
-
-            self_.infer_ctxt().enter(|infcx| {
-                value.my_trans_normalize(&infcx, param_env)
-            })
-        }
-    }
-
-    fn validate_variant(
-        &mut self,
-        query: ValidationQuery<'tcx>,
-        variant: &ty::VariantDef,
-        subst: &ty::subst::Substs<'tcx>,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx> {
-        // TODO: Maybe take visibility/privacy into account.
-        for (idx, field_def) in variant.fields.iter().enumerate() {
-            let field_ty = field_def.ty(self.tcx, subst);
-            let field = mir::Field::new(idx);
-            let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
-            self.validate(
-                ValidationQuery {
-                    lval: (query.lval.0.clone().field(field), field_lvalue),
-                    ty: field_ty,
-                    ..query
-                },
-                mode,
-            )?;
-        }
-        Ok(())
-    }
-
-    fn validate_ptr(
-        &mut self,
-        val: Value,
-        abs_lval: AbsLvalue<'tcx>,
-        pointee_ty: Ty<'tcx>,
-        re: Option<region::Scope>,
-        mutbl: Mutability,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx> {
-        // Check alignment and non-NULLness
-        let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
-        let ptr = val.into_ptr(&self.memory)?;
-        self.memory.check_align(ptr, align, None)?;
-
-        // Recurse
-        let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
-        self.validate(
-            ValidationQuery {
-                lval: (abs_lval.deref(), pointee_lvalue),
-                ty: pointee_ty,
-                re,
-                mutbl,
-            },
-            mode,
-        )
-    }
-
-    /// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks
-    fn validate(
-        &mut self,
-        mut query: ValidationQuery<'tcx>,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx> {
-        use rustc::ty::TypeVariants::*;
-        use rustc::ty::RegionKind::*;
-        use rustc::ty::AdtKind;
-
-        // No point releasing shared stuff.
-        if !mode.acquiring() && query.mutbl == MutImmutable {
-            return Ok(());
-        }
-        // When we recover, we may see data whose validity *just* ended.  Do not acquire it.
-        if let ValidationMode::Recover(ending_ce) = mode {
-            if query.re == Some(ending_ce) {
-                return Ok(());
-            }
-        }
-
-        query.ty = self.normalize_type_unerased(&query.ty);
-        trace!("{:?} on {:?}", mode, query);
-
-        // Decide whether this type *owns* the memory it covers (like integers), or whether it
-        // just assembles pieces (that each own their memory) together to a larger whole.
-        // TODO: Currently, we don't acquire locks for padding and discriminants. We should.
-        let is_owning = match query.ty.sty {
-            TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr |
-            TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
-            TyAdt(adt, _) if adt.is_box() => true,
-            TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) |
-            TyDynamic(..) | TyGenerator(..) => false,
-            TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => {
-                bug!("I got an incomplete/unnormalized type for validation")
-            }
-        };
-        if is_owning {
-            // We need to lock.  So we need memory.  So we have to force_acquire.
-            // Tracking the same state for locals not backed by memory would just duplicate too
-            // much machinery.
-            // FIXME: We ignore alignment.
-            let (ptr, extra) = self.force_allocation(query.lval.1)?.to_ptr_extra_aligned();
-            // Determine the size
-            // FIXME: Can we reuse size_and_align_of_dst for Lvalues?
-            let len = match self.type_size(query.ty)? {
-                Some(size) => {
-                    assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
-                    size
-                }
-                None => {
-                    // The only unsized typ we concider "owning" is TyStr.
-                    assert_eq!(
-                        query.ty.sty,
-                        TyStr,
-                        "Found a surprising unsized owning type"
-                    );
-                    // The extra must be the length, in bytes.
-                    match extra {
-                        LvalueExtra::Length(len) => len,
-                        _ => bug!("TyStr must have a length as extra"),
-                    }
-                }
-            };
-            // Handle locking
-            if len > 0 {
-                let ptr = ptr.to_ptr()?;
-                match query.mutbl {
-                    MutImmutable => {
-                        if mode.acquiring() {
-                            self.memory.acquire_lock(
-                                ptr,
-                                len,
-                                query.re,
-                                AccessKind::Read,
-                            )?;
-                        }
-                    }
-                    // No releasing of read locks, ever.
-                    MutMutable => {
-                        match mode {
-                            ValidationMode::Acquire => {
-                                self.memory.acquire_lock(
-                                    ptr,
-                                    len,
-                                    query.re,
-                                    AccessKind::Write,
-                                )?
-                            }
-                            ValidationMode::Recover(ending_ce) => {
-                                self.memory.recover_write_lock(
-                                    ptr,
-                                    len,
-                                    &query.lval.0,
-                                    query.re,
-                                    ending_ce,
-                                )?
-                            }
-                            ValidationMode::ReleaseUntil(suspended_ce) => {
-                                self.memory.suspend_write_lock(
-                                    ptr,
-                                    len,
-                                    &query.lval.0,
-                                    suspended_ce,
-                                )?
-                            }
-                        }
-                    }
-                }
-            }
-        }
-
-        let res = do catch {
-            match query.ty.sty {
-                TyInt(_) | TyUint(_) | TyRawPtr(_) => {
-                    if mode.acquiring() {
-                        // Make sure we can read this.
-                        let val = self.read_lvalue(query.lval.1)?;
-                        self.follow_by_ref_value(val, query.ty)?;
-                        // FIXME: It would be great to rule out Undef here, but that doesn't actually work.
-                        // Passing around undef data is a thing that e.g. Vec::extend_with does.
-                    }
-                    Ok(())
-                }
-                TyBool | TyFloat(_) | TyChar => {
-                    if mode.acquiring() {
-                        let val = self.read_lvalue(query.lval.1)?;
-                        let val = self.value_to_primval(ValTy { value: val, ty: query.ty })?;
-                        val.to_bytes()?;
-                        // TODO: Check if these are valid bool/float/codepoint/UTF-8
-                    }
-                    Ok(())
-                }
-                TyNever => err!(ValidationFailure(format!("The empty type is never valid."))),
-                TyRef(region,
-                    ty::TypeAndMut {
-                        ty: pointee_ty,
-                        mutbl,
-                    }) => {
-                    let val = self.read_lvalue(query.lval.1)?;
-                    // Sharing restricts our context
-                    if mutbl == MutImmutable {
-                        query.mutbl = MutImmutable;
-                    }
-                    // Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
-                    // we record the region of this borrow to the context.
-                    if query.re == None {
-                        match *region {
-                            ReScope(scope) => query.re = Some(scope),
-                            // It is possible for us to encounter erased lifetimes here because the lifetimes in
-                            // this functions' Subst will be erased.
-                            _ => {}
-                        }
-                    }
-                    self.validate_ptr(val, query.lval.0, pointee_ty, query.re, query.mutbl, mode)
-                }
-                TyAdt(adt, _) if adt.is_box() => {
-                    let val = self.read_lvalue(query.lval.1)?;
-                    self.validate_ptr(val, query.lval.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)
-                }
-                TyFnPtr(_sig) => {
-                    let ptr = self.read_lvalue(query.lval.1)?
-                        .into_ptr(&self.memory)?
-                        .to_ptr()?;
-                    self.memory.get_fn(ptr)?;
-                    // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
-                    Ok(())
-                }
-                TyFnDef(..) => {
-                    // This is a zero-sized type with all relevant data sitting in the type.
-                    // There is nothing to validate.
-                    Ok(())
-                }
-
-                // Compound types
-                TyStr => {
-                    // TODO: Validate strings
-                    Ok(())
-                }
-                TySlice(elem_ty) => {
-                    let len = match query.lval.1 {
-                        Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
-                        _ => {
-                            bug!(
-                                "acquire_valid of a TySlice given non-slice lvalue: {:?}",
-                                query.lval
-                            )
-                        }
-                    };
-                    for i in 0..len {
-                        let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i)?;
-                        self.validate(
-                            ValidationQuery {
-                                lval: (query.lval.0.clone().index(i), inner_lvalue),
-                                ty: elem_ty,
-                                ..query
-                            },
-                            mode,
-                        )?;
-                    }
-                    Ok(())
-                }
-                TyArray(elem_ty, len) => {
-                    let len = len.val.to_const_int().unwrap().to_u64().unwrap();
-                    for i in 0..len {
-                        let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i as u64)?;
-                        self.validate(
-                            ValidationQuery {
-                                lval: (query.lval.0.clone().index(i as u64), inner_lvalue),
-                                ty: elem_ty,
-                                ..query
-                            },
-                            mode,
-                        )?;
-                    }
-                    Ok(())
-                }
-                TyDynamic(_data, _region) => {
-                    // Check that this is a valid vtable
-                    let vtable = match query.lval.1 {
-                        Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
-                        _ => {
-                            bug!(
-                                "acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}",
-                                query.lval
-                            )
-                        }
-                    };
-                    self.read_size_and_align_from_vtable(vtable)?;
-                    // TODO: Check that the vtable contains all the function pointers we expect it to have.
-                    // Trait objects cannot have any operations performed
-                    // on them directly.  We cannot, in general, even acquire any locks as the trait object *could*
-                    // contain an UnsafeCell.  If we call functions to get access to data, we will validate
-                    // their return values.  So, it doesn't seem like there's anything else to do.
-                    Ok(())
-                }
-                TyAdt(adt, subst) => {
-                    if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() &&
-                        query.mutbl == MutImmutable
-                    {
-                        // No locks for shared unsafe cells.  Also no other validation, the only field is private anyway.
-                        return Ok(());
-                    }
-
-                    match adt.adt_kind() {
-                        AdtKind::Enum => {
-                            // TODO: Can we get the discriminant without forcing an allocation?
-                            let ptr = self.force_allocation(query.lval.1)?.to_ptr()?;
-                            let discr = self.read_discriminant_value(ptr, query.ty)?;
-
-                            // Get variant index for discriminant
-                            let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| {
-                                variant_discr.to_u128_unchecked() == discr
-                            });
-                            let variant_idx = match variant_idx {
-                                Some(val) => val,
-                                None => return err!(InvalidDiscriminant),
-                            };
-                            let variant = &adt.variants[variant_idx];
-
-                            if variant.fields.len() > 0 {
-                                // Downcast to this variant, if needed
-                                let lval = if adt.variants.len() > 1 {
-                                    (
-                                        query.lval.0.downcast(adt, variant_idx),
-                                        self.eval_lvalue_projection(
-                                            query.lval.1,
-                                            query.ty,
-                                            &mir::ProjectionElem::Downcast(adt, variant_idx),
-                                        )?,
-                                    )
-                                } else {
-                                    query.lval
-                                };
-
-                                // Recursively validate the fields
-                                self.validate_variant(
-                                    ValidationQuery { lval, ..query },
-                                    variant,
-                                    subst,
-                                    mode,
-                                )
-                            } else {
-                                // No fields, nothing left to check.  Downcasting may fail, e.g. in case of a CEnum.
-                                Ok(())
-                            }
-                        }
-                        AdtKind::Struct => {
-                            self.validate_variant(query, adt.struct_variant(), subst, mode)
-                        }
-                        AdtKind::Union => {
-                            // No guarantees are provided for union types.
-                            // TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
-                            Ok(())
-                        }
-                    }
-                }
-                TyTuple(ref types, _) => {
-                    for (idx, field_ty) in types.iter().enumerate() {
-                        let field = mir::Field::new(idx);
-                        let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
-                        self.validate(
-                            ValidationQuery {
-                                lval: (query.lval.0.clone().field(field), field_lvalue),
-                                ty: field_ty,
-                                ..query
-                            },
-                            mode,
-                        )?;
-                    }
-                    Ok(())
-                }
-                TyClosure(def_id, ref closure_substs) => {
-                    for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
-                        let field = mir::Field::new(idx);
-                        let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
-                        self.validate(
-                            ValidationQuery {
-                                lval: (query.lval.0.clone().field(field), field_lvalue),
-                                ty: field_ty,
-                                ..query
-                            },
-                            mode,
-                        )?;
-                    }
-                    // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
-                    // Is there other things we can/should check?  Like vtable pointers?
-                    Ok(())
-                }
-                // FIXME: generators aren't validated right now
-                TyGenerator(..) => Ok(()),
-                _ => bug!("We already established that this is a type we support. ({})", query.ty),
-            }
-        };
-        match res {
-            // ReleaseUntil(None) of an uninitalized variable is a NOP.  This is needed because
-            // we have to release the return value of a function; due to destination-passing-style
-            // the callee may directly write there.
-            // TODO: Ideally we would know whether the destination is already initialized, and only
-            // release if it is.  But of course that can't even always be statically determined.
-            Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
-                if mode == ValidationMode::ReleaseUntil(None) => {
-                return Ok(());
-            }
-            res => res,
-        }
-    }
-}
diff --git a/src/librustc_mir/interpret/value.rs b/src/librustc_mir/interpret/value.rs
deleted file mode 100644 (file)
index e052ec1..0000000
+++ /dev/null
@@ -1,405 +0,0 @@
-#![allow(unknown_lints)]
-
-use rustc::ty::layout::HasDataLayout;
-
-use super::{EvalResult, Memory, MemoryPointer, HasMemory, PointerArithmetic, Machine, PtrAndAlign};
-
-pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
-    f32::from_bits(bytes as u32)
-}
-
-pub(super) fn bytes_to_f64(bytes: u128) -> f64 {
-    f64::from_bits(bytes as u64)
-}
-
-pub(super) fn f32_to_bytes(f: f32) -> u128 {
-    f.to_bits() as u128
-}
-
-pub(super) fn f64_to_bytes(f: f64) -> u128 {
-    f.to_bits() as u128
-}
-
-/// A `Value` represents a single self-contained Rust value.
-///
-/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve
-/// value held directly, outside of any allocation (`ByVal`).  For `ByRef`-values, we remember
-/// whether the pointer is supposed to be aligned or not (also see Lvalue).
-///
-/// For optimization of a few very common cases, there is also a representation for a pair of
-/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
-/// operations and fat pointers. This idea was taken from rustc's trans.
-#[derive(Clone, Copy, Debug)]
-pub enum Value {
-    ByRef(PtrAndAlign),
-    ByVal(PrimVal),
-    ByValPair(PrimVal, PrimVal),
-}
-
-/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally.
-/// This type clears up a few APIs where having a `PrimVal` argument for something that is
-/// potentially an integer pointer or a pointer to an allocation was unclear.
-///
-/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just
-/// the representation of pointers. Also all the sites that convert between primvals and pointers
-/// are explicit now (and rare!)
-#[derive(Clone, Copy, Debug)]
-pub struct Pointer {
-    primval: PrimVal,
-}
-
-impl<'tcx> Pointer {
-    pub fn null() -> Self {
-        PrimVal::Bytes(0).into()
-    }
-    pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
-        self.primval.to_ptr()
-    }
-    pub fn into_inner_primval(self) -> PrimVal {
-        self.primval
-    }
-
-    pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
-        let layout = cx.data_layout();
-        match self.primval {
-            PrimVal::Bytes(b) => {
-                assert_eq!(b as u64 as u128, b);
-                Ok(Pointer::from(
-                    PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128),
-                ))
-            }
-            PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from),
-            PrimVal::Undef => err!(ReadUndefBytes),
-        }
-    }
-
-    pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
-        let layout = cx.data_layout();
-        match self.primval {
-            PrimVal::Bytes(b) => {
-                assert_eq!(b as u64 as u128, b);
-                Ok(Pointer::from(
-                    PrimVal::Bytes(layout.offset(b as u64, i)? as u128),
-                ))
-            }
-            PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from),
-            PrimVal::Undef => err!(ReadUndefBytes),
-        }
-    }
-
-    pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
-        let layout = cx.data_layout();
-        match self.primval {
-            PrimVal::Bytes(b) => {
-                assert_eq!(b as u64 as u128, b);
-                Ok(Pointer::from(PrimVal::Bytes(
-                    layout.wrapping_signed_offset(b as u64, i) as u128,
-                )))
-            }
-            PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))),
-            PrimVal::Undef => err!(ReadUndefBytes),
-        }
-    }
-
-    pub fn is_null(self) -> EvalResult<'tcx, bool> {
-        match self.primval {
-            PrimVal::Bytes(b) => Ok(b == 0),
-            PrimVal::Ptr(_) => Ok(false),
-            PrimVal::Undef => err!(ReadUndefBytes),
-        }
-    }
-
-    pub fn to_value_with_len(self, len: u64) -> Value {
-        Value::ByValPair(self.primval, PrimVal::from_u128(len as u128))
-    }
-
-    pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value {
-        Value::ByValPair(self.primval, PrimVal::Ptr(vtable))
-    }
-
-    pub fn to_value(self) -> Value {
-        Value::ByVal(self.primval)
-    }
-}
-
-impl ::std::convert::From<PrimVal> for Pointer {
-    fn from(primval: PrimVal) -> Self {
-        Pointer { primval }
-    }
-}
-
-impl ::std::convert::From<MemoryPointer> for Pointer {
-    fn from(ptr: MemoryPointer) -> Self {
-        PrimVal::Ptr(ptr).into()
-    }
-}
-
-/// A `PrimVal` represents an immediate, primitive value existing outside of a
-/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
-/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes
-/// of a simple value, a pointer into another `Allocation`, or be undefined.
-#[derive(Clone, Copy, Debug)]
-pub enum PrimVal {
-    /// The raw bytes of a simple value.
-    Bytes(u128),
-
-    /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
-    /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the
-    /// relocation and its associated offset together as a `MemoryPointer` here.
-    Ptr(MemoryPointer),
-
-    /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe
-    /// to copy around, just like undefined bytes in an `Allocation`.
-    Undef,
-}
-
-#[derive(Clone, Copy, Debug, PartialEq)]
-pub enum PrimValKind {
-    I8, I16, I32, I64, I128,
-    U8, U16, U32, U64, U128,
-    F32, F64,
-    Ptr, FnPtr,
-    Bool,
-    Char,
-}
-
-impl<'a, 'tcx: 'a> Value {
-    #[inline]
-    pub fn by_ref(ptr: Pointer) -> Self {
-        Value::ByRef(PtrAndAlign { ptr, aligned: true })
-    }
-
-    /// Convert the value into a pointer (or a pointer-sized integer).  If the value is a ByRef,
-    /// this may have to perform a load.
-    pub fn into_ptr<M: Machine<'tcx>>(
-        &self,
-        mem: &Memory<'a, 'tcx, M>,
-    ) -> EvalResult<'tcx, Pointer> {
-        use self::Value::*;
-        Ok(match *self {
-            ByRef(PtrAndAlign { ptr, aligned }) => {
-                mem.read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))?
-            }
-            ByVal(ptr) |
-            ByValPair(ptr, _) => ptr,
-        }.into())
-    }
-
-    pub(super) fn into_ptr_vtable_pair<M: Machine<'tcx>>(
-        &self,
-        mem: &Memory<'a, 'tcx, M>,
-    ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
-        use self::Value::*;
-        match *self {
-            ByRef(PtrAndAlign {
-                      ptr: ref_ptr,
-                      aligned,
-                  }) => {
-                mem.read_maybe_aligned(aligned, |mem| {
-                    let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
-                    let vtable = mem.read_ptr_sized_unsigned(
-                        ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?,
-                    )?.to_ptr()?;
-                    Ok((ptr, vtable))
-                })
-            }
-
-            ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
-
-            ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
-            _ => bug!("expected ptr and vtable, got {:?}", self),
-        }
-    }
-
-    pub(super) fn into_slice<M: Machine<'tcx>>(
-        &self,
-        mem: &Memory<'a, 'tcx, M>,
-    ) -> EvalResult<'tcx, (Pointer, u64)> {
-        use self::Value::*;
-        match *self {
-            ByRef(PtrAndAlign {
-                      ptr: ref_ptr,
-                      aligned,
-                  }) => {
-                mem.read_maybe_aligned(aligned, |mem| {
-                    let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
-                    let len = mem.read_ptr_sized_unsigned(
-                        ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?,
-                    )?.to_bytes()? as u64;
-                    Ok((ptr, len))
-                })
-            }
-            ByValPair(ptr, val) => {
-                let len = val.to_u128()?;
-                assert_eq!(len as u64 as u128, len);
-                Ok((ptr.into(), len as u64))
-            }
-            ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
-            ByVal(_) => bug!("expected ptr and length, got {:?}", self),
-        }
-    }
-}
-
-impl<'tcx> PrimVal {
-    pub fn from_u128(n: u128) -> Self {
-        PrimVal::Bytes(n)
-    }
-
-    pub fn from_i128(n: i128) -> Self {
-        PrimVal::Bytes(n as u128)
-    }
-
-    pub fn from_f32(f: f32) -> Self {
-        PrimVal::Bytes(f32_to_bytes(f))
-    }
-
-    pub fn from_f64(f: f64) -> Self {
-        PrimVal::Bytes(f64_to_bytes(f))
-    }
-
-    pub fn from_bool(b: bool) -> Self {
-        PrimVal::Bytes(b as u128)
-    }
-
-    pub fn from_char(c: char) -> Self {
-        PrimVal::Bytes(c as u128)
-    }
-
-    pub fn to_bytes(self) -> EvalResult<'tcx, u128> {
-        match self {
-            PrimVal::Bytes(b) => Ok(b),
-            PrimVal::Ptr(_) => err!(ReadPointerAsBytes),
-            PrimVal::Undef => err!(ReadUndefBytes),
-        }
-    }
-
-    pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
-        match self {
-            PrimVal::Bytes(_) => err!(ReadBytesAsPointer),
-            PrimVal::Ptr(p) => Ok(p),
-            PrimVal::Undef => err!(ReadUndefBytes),
-        }
-    }
-
-    pub fn is_bytes(self) -> bool {
-        match self {
-            PrimVal::Bytes(_) => true,
-            _ => false,
-        }
-    }
-
-    pub fn is_ptr(self) -> bool {
-        match self {
-            PrimVal::Ptr(_) => true,
-            _ => false,
-        }
-    }
-
-    pub fn is_undef(self) -> bool {
-        match self {
-            PrimVal::Undef => true,
-            _ => false,
-        }
-    }
-
-    pub fn to_u128(self) -> EvalResult<'tcx, u128> {
-        self.to_bytes()
-    }
-
-    pub fn to_u64(self) -> EvalResult<'tcx, u64> {
-        self.to_bytes().map(|b| {
-            assert_eq!(b as u64 as u128, b);
-            b as u64
-        })
-    }
-
-    pub fn to_i32(self) -> EvalResult<'tcx, i32> {
-        self.to_bytes().map(|b| {
-            assert_eq!(b as i32 as u128, b);
-            b as i32
-        })
-    }
-
-    pub fn to_i128(self) -> EvalResult<'tcx, i128> {
-        self.to_bytes().map(|b| b as i128)
-    }
-
-    pub fn to_i64(self) -> EvalResult<'tcx, i64> {
-        self.to_bytes().map(|b| {
-            assert_eq!(b as i64 as u128, b);
-            b as i64
-        })
-    }
-
-    pub fn to_f32(self) -> EvalResult<'tcx, f32> {
-        self.to_bytes().map(bytes_to_f32)
-    }
-
-    pub fn to_f64(self) -> EvalResult<'tcx, f64> {
-        self.to_bytes().map(bytes_to_f64)
-    }
-
-    pub fn to_bool(self) -> EvalResult<'tcx, bool> {
-        match self.to_bytes()? {
-            0 => Ok(false),
-            1 => Ok(true),
-            _ => err!(InvalidBool),
-        }
-    }
-}
-
-impl PrimValKind {
-    pub fn is_int(self) -> bool {
-        use self::PrimValKind::*;
-        match self {
-            I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true,
-            _ => false,
-        }
-    }
-
-    pub fn is_signed_int(self) -> bool {
-        use self::PrimValKind::*;
-        match self {
-            I8 | I16 | I32 | I64 | I128 => true,
-            _ => false,
-        }
-    }
-
-    pub fn is_float(self) -> bool {
-        use self::PrimValKind::*;
-        match self {
-            F32 | F64 => true,
-            _ => false,
-        }
-    }
-
-    pub fn from_uint_size(size: u64) -> Self {
-        match size {
-            1 => PrimValKind::U8,
-            2 => PrimValKind::U16,
-            4 => PrimValKind::U32,
-            8 => PrimValKind::U64,
-            16 => PrimValKind::U128,
-            _ => bug!("can't make uint with size {}", size),
-        }
-    }
-
-    pub fn from_int_size(size: u64) -> Self {
-        match size {
-            1 => PrimValKind::I8,
-            2 => PrimValKind::I16,
-            4 => PrimValKind::I32,
-            8 => PrimValKind::I64,
-            16 => PrimValKind::I128,
-            _ => bug!("can't make int with size {}", size),
-        }
-    }
-
-    pub fn is_ptr(self) -> bool {
-        use self::PrimValKind::*;
-        match self {
-            Ptr | FnPtr => true,
-            _ => false,
-        }
-    }
-}