]> git.lizzy.rs Git - rust.git/commitdiff
Rollup merge of #58301 - RalfJung:fat-ptr-eq, r=oli-obk
authorMazdak Farrokhzad <twingoow@gmail.com>
Thu, 14 Feb 2019 01:41:23 +0000 (02:41 +0100)
committerGitHub <noreply@github.com>
Thu, 14 Feb 2019 01:41:23 +0000 (02:41 +0100)
Enable comparing fat pointers

Also refactor our binops a bit to make that happen more easily.

r? @oli-obk

src/librustc_mir/const_eval.rs
src/librustc_mir/interpret/cast.rs
src/librustc_mir/interpret/intrinsics.rs
src/librustc_mir/interpret/machine.rs
src/librustc_mir/interpret/operand.rs
src/librustc_mir/interpret/operator.rs
src/librustc_mir/interpret/place.rs
src/librustc_mir/interpret/step.rs
src/librustc_mir/interpret/terminator.rs
src/librustc_mir/interpret/traits.rs
src/librustc_mir/transform/const_prop.rs

index fb0c19f764c1367bdfeffc74b16e1e4865ce88fe..7be7f4b439289bc0615fc703f2711f9947fd0bd6 100644 (file)
@@ -11,7 +11,7 @@
 use rustc::mir::interpret::{ConstEvalErr, ErrorHandled};
 use rustc::mir;
 use rustc::ty::{self, TyCtxt, query::TyCtxtAt};
-use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx};
+use rustc::ty::layout::{self, LayoutOf, VariantIdx};
 use rustc::ty::subst::Subst;
 use rustc::traits::Reveal;
 use rustc_data_structures::fx::FxHashMap;
@@ -21,7 +21,8 @@
 use syntax::source_map::{Span, DUMMY_SP};
 
 use crate::interpret::{self,
-    PlaceTy, MPlaceTy, MemPlace, OpTy, Operand, Immediate, Scalar, RawConst, ConstValue, Pointer,
+    PlaceTy, MPlaceTy, MemPlace, OpTy, ImmTy, Operand, Immediate, Scalar, Pointer,
+    RawConst, ConstValue,
     EvalResult, EvalError, EvalErrorKind, GlobalId, EvalContext, StackPopCleanup,
     Allocation, AllocId, MemoryKind,
     snapshot, RefTracking,
@@ -77,7 +78,7 @@ pub fn op_to_const<'tcx>(
     let normalized_op = if normalize {
         ecx.try_read_immediate(op)?
     } else {
-        match op.op {
+        match *op {
             Operand::Indirect(mplace) => Err(mplace),
             Operand::Immediate(val) => Ok(val)
         }
@@ -105,15 +106,6 @@ pub fn op_to_const<'tcx>(
     Ok(ty::Const { val, ty: op.layout.ty })
 }
 
-pub fn lazy_const_to_op<'tcx>(
-    ecx: &CompileTimeEvalContext<'_, '_, 'tcx>,
-    cnst: ty::LazyConst<'tcx>,
-    ty: ty::Ty<'tcx>,
-) -> EvalResult<'tcx, OpTy<'tcx>> {
-    let op = ecx.const_value_to_op(cnst)?;
-    Ok(OpTy { op, layout: ecx.layout_of(ty)? })
-}
-
 fn eval_body_and_ecx<'a, 'mir, 'tcx>(
     tcx: TyCtxt<'a, 'tcx, 'tcx>,
     cid: GlobalId<'tcx>,
@@ -388,10 +380,8 @@ fn call_intrinsic(
     fn ptr_op(
         _ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
         _bin_op: mir::BinOp,
-        _left: Scalar,
-        _left_layout: TyLayout<'tcx>,
-        _right: Scalar,
-        _right_layout: TyLayout<'tcx>,
+        _left: ImmTy<'tcx>,
+        _right: ImmTy<'tcx>,
     ) -> EvalResult<'tcx, (Scalar, bool)> {
         Err(
             ConstEvalError::NeedsRfc("pointer arithmetic or comparison".to_string()).into(),
@@ -486,7 +476,7 @@ pub fn const_field<'a, 'tcx>(
     let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env);
     let result = (|| {
         // get the operand again
-        let op = lazy_const_to_op(&ecx, ty::LazyConst::Evaluated(value), value.ty)?;
+        let op = ecx.lazy_const_to_op(ty::LazyConst::Evaluated(value), value.ty)?;
         // downcast
         let down = match variant {
             None => op,
@@ -512,7 +502,7 @@ pub fn const_variant_index<'a, 'tcx>(
 ) -> EvalResult<'tcx, VariantIdx> {
     trace!("const_variant_index: {:?}", val);
     let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env);
-    let op = lazy_const_to_op(&ecx, ty::LazyConst::Evaluated(val), val.ty)?;
+    let op = ecx.lazy_const_to_op(ty::LazyConst::Evaluated(val), val.ty)?;
     Ok(ecx.read_discriminant(op)?.1)
 }
 
index c3b71be8354daabcb1c6c1b6c0ce952876f740cd..ce62d79e585a8bbf32d5695216950f18f73424e0 100644 (file)
@@ -9,7 +9,7 @@
 use rustc::mir::CastKind;
 use rustc_apfloat::Float;
 
-use super::{EvalContext, Machine, PlaceTy, OpTy, Immediate};
+use super::{EvalContext, Machine, PlaceTy, OpTy, ImmTy, Immediate};
 
 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
     fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
@@ -372,7 +372,7 @@ fn unsize_into(
                             assert_eq!(src.layout.fields.offset(i).bytes(), 0);
                             assert_eq!(src_field_layout.size, src.layout.size);
                             // just sawp out the layout
-                            OpTy { op: src.op, layout: src_field_layout }
+                            OpTy::from(ImmTy { imm: src.to_immediate(), layout: src_field_layout })
                         }
                     };
                     if src_field.layout.ty == dst_field.layout.ty {
index 78c5c0a6d751c8f1acec74ae6084e27010cc1d20..e002c3fd511d6d780a80e731f283b3f22ba53efb 100644 (file)
@@ -126,7 +126,7 @@ pub fn emulate_intrinsic(
                 let l = self.read_immediate(args[0])?;
                 let r = self.read_immediate(args[1])?;
                 let is_add = intrinsic_name == "saturating_add";
-                let (val, overflowed) = self.binary_op_imm(if is_add {
+                let (val, overflowed) = self.binary_op(if is_add {
                     BinOp::Add
                 } else {
                     BinOp::Sub
@@ -173,7 +173,7 @@ pub fn emulate_intrinsic(
                     "unchecked_shr" => BinOp::Shr,
                     _ => bug!("Already checked for int ops")
                 };
-                let (val, overflowed) = self.binary_op_imm(bin_op, l, r)?;
+                let (val, overflowed) = self.binary_op(bin_op, l, r)?;
                 if overflowed {
                     let layout = self.layout_of(substs.type_at(0))?;
                     let r_val =  r.to_scalar()?.to_bits(layout.size)?;
index 8f34b832f0b41451e1f5dc83eb0481dd607f5376..7fb4c47d92acb4d4d12c64fd47a18a130f5a5149 100644 (file)
@@ -7,11 +7,11 @@
 
 use rustc::hir::{self, def_id::DefId};
 use rustc::mir;
-use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt};
+use rustc::ty::{self, query::TyCtxtAt};
 
 use super::{
     Allocation, AllocId, EvalResult, Scalar, AllocationExtra,
-    EvalContext, PlaceTy, MPlaceTy, OpTy, Pointer, MemoryKind,
+    EvalContext, PlaceTy, MPlaceTy, OpTy, ImmTy, Pointer, MemoryKind,
 };
 
 /// Whether this kind of memory is allowed to leak
@@ -158,10 +158,8 @@ fn adjust_static_allocation<'b>(
     fn ptr_op(
         ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
         bin_op: mir::BinOp,
-        left: Scalar<Self::PointerTag>,
-        left_layout: TyLayout<'tcx>,
-        right: Scalar<Self::PointerTag>,
-        right_layout: TyLayout<'tcx>,
+        left: ImmTy<'tcx, Self::PointerTag>,
+        right: ImmTy<'tcx, Self::PointerTag>,
     ) -> EvalResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
 
     /// Heap allocations via the `box` keyword.
index c0b26442dd918c75a4b859916dbdab81c65bf989..7da907028eebfd542a5bd62032991f77294d7b07 100644 (file)
     ConstValue, Pointer, Scalar,
     EvalResult, EvalErrorKind,
 };
-use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind};
+use super::{
+    EvalContext, Machine, AllocMap, Allocation, AllocationExtra,
+    MemPlace, MPlaceTy, PlaceTy, Place, MemoryKind,
+};
 pub use rustc::mir::interpret::ScalarMaybeUndef;
 
 /// A `Value` represents a single immediate self-contained Rust value.
@@ -41,6 +44,11 @@ pub fn with_default_tag<Tag>(self) -> Immediate<Tag>
 }
 
 impl<'tcx, Tag> Immediate<Tag> {
+    #[inline]
+    pub fn from_scalar(val: Scalar<Tag>) -> Self {
+        Immediate::Scalar(ScalarMaybeUndef::Scalar(val))
+    }
+
     #[inline]
     pub fn erase_tag(self) -> Immediate
     {
@@ -112,7 +120,7 @@ pub fn to_meta(self) -> EvalResult<'tcx, Option<Scalar<Tag>>> {
 // as input for binary and cast operations.
 #[derive(Copy, Clone, Debug)]
 pub struct ImmTy<'tcx, Tag=()> {
-    immediate: Immediate<Tag>,
+    pub imm: Immediate<Tag>,
     pub layout: TyLayout<'tcx>,
 }
 
@@ -120,7 +128,7 @@ impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> {
     type Target = Immediate<Tag>;
     #[inline(always)]
     fn deref(&self) -> &Immediate<Tag> {
-        &self.immediate
+        &self.imm
     }
 }
 
@@ -180,7 +188,7 @@ pub fn to_immediate(self) -> Immediate<Tag>
 
 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
 pub struct OpTy<'tcx, Tag=()> {
-    crate op: Operand<Tag>, // ideally we'd make this private, but const_prop needs this
+    op: Operand<Tag>,
     pub layout: TyLayout<'tcx>,
 }
 
@@ -206,12 +214,25 @@ impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
     #[inline(always)]
     fn from(val: ImmTy<'tcx, Tag>) -> Self {
         OpTy {
-            op: Operand::Immediate(val.immediate),
+            op: Operand::Immediate(val.imm),
             layout: val.layout
         }
     }
 }
 
+impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag>
+{
+    #[inline]
+    pub fn from_scalar(val: Scalar<Tag>, layout: TyLayout<'tcx>) -> Self {
+        ImmTy { imm: Immediate::from_scalar(val), layout }
+    }
+
+    #[inline]
+    pub fn to_bits(self) -> EvalResult<'tcx, u128> {
+        self.to_scalar()?.to_bits(self.layout.size)
+    }
+}
+
 impl<'tcx, Tag> OpTy<'tcx, Tag>
 {
     #[inline]
@@ -324,8 +345,8 @@ pub fn read_immediate(
         &self,
         op: OpTy<'tcx, M::PointerTag>
     ) -> EvalResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
-        if let Ok(immediate) = self.try_read_immediate(op)? {
-            Ok(ImmTy { immediate, layout: op.layout })
+        if let Ok(imm) = self.try_read_immediate(op)? {
+            Ok(ImmTy { imm, layout: op.layout })
         } else {
             bug!("primitive read failed for type: {:?}", op.layout.ty);
         }
@@ -469,6 +490,22 @@ pub fn access_local(
         Ok(OpTy { op, layout })
     }
 
+    /// Every place can be read from, so we can turm them into an operand
+    #[inline(always)]
+    pub fn place_to_op(
+        &self,
+        place: PlaceTy<'tcx, M::PointerTag>
+    ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let op = match *place {
+            Place::Ptr(mplace) => {
+                Operand::Indirect(mplace)
+            }
+            Place::Local { frame, local } =>
+                *self.stack[frame].locals[local].access()?
+        };
+        Ok(OpTy { op, layout: place.layout })
+    }
+
     // Evaluate a place with the goal of reading from it.  This lets us sometimes
     // avoid allocations.
     fn eval_place_to_op(
@@ -531,10 +568,8 @@ pub(super) fn eval_operands(
             .collect()
     }
 
-    // Used when miri runs into a constant, and by CTFE.
-    // FIXME: CTFE should use allocations, then we can make this private (embed it into
-    // `eval_operand`, ideally).
-    pub(crate) fn const_value_to_op(
+    // Used when Miri runs into a constant, and (indirectly through lazy_const_to_op) by CTFE.
+    fn const_value_to_op(
         &self,
         val: ty::LazyConst<'tcx>,
     ) -> EvalResult<'tcx, Operand<M::PointerTag>> {
@@ -666,3 +701,21 @@ pub fn read_discriminant(
     }
 
 }
+
+impl<'a, 'mir, 'tcx, M> EvalContext<'a, 'mir, 'tcx, M>
+where
+    M: Machine<'a, 'mir, 'tcx, PointerTag=()>,
+    // FIXME: Working around https://github.com/rust-lang/rust/issues/24159
+    M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation<(), M::AllocExtra>)>,
+    M::AllocExtra: AllocationExtra<(), M::MemoryExtra>,
+{
+    // FIXME: CTFE should use allocations, then we can remove this.
+    pub(crate) fn lazy_const_to_op(
+        &self,
+        cnst: ty::LazyConst<'tcx>,
+        ty: ty::Ty<'tcx>,
+    ) -> EvalResult<'tcx, OpTy<'tcx>> {
+        let op = self.const_value_to_op(cnst)?;
+        Ok(OpTy { op, layout: self.layout_of(ty)? })
+    }
+}
index 5e3335f4c721909cd90bec551d0e9e31c0efc6a2..b3b9c742d6c282bc020aaaa879d3a07d9029da3b 100644 (file)
@@ -18,7 +18,7 @@ pub fn binop_with_overflow(
         right: ImmTy<'tcx, M::PointerTag>,
         dest: PlaceTy<'tcx, M::PointerTag>,
     ) -> EvalResult<'tcx> {
-        let (val, overflowed) = self.binary_op_imm(op, left, right)?;
+        let (val, overflowed) = self.binary_op(op, left, right)?;
         let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
         self.write_immediate(val, dest)
     }
@@ -32,7 +32,7 @@ pub fn binop_ignore_overflow(
         right: ImmTy<'tcx, M::PointerTag>,
         dest: PlaceTy<'tcx, M::PointerTag>,
     ) -> EvalResult<'tcx> {
-        let (val, _overflowed) = self.binary_op_imm(op, left, right)?;
+        let (val, _overflowed) = self.binary_op(op, left, right)?;
         self.write_scalar(val, dest)
     }
 }
@@ -272,69 +272,55 @@ fn binary_int_op(
         Ok((val, false))
     }
 
-    /// Convenience wrapper that's useful when keeping the layout together with the
-    /// immediate value.
+    /// Returns the result of the specified operation and whether it overflowed.
     #[inline]
-    pub fn binary_op_imm(
+    pub fn binary_op(
         &self,
         bin_op: mir::BinOp,
         left: ImmTy<'tcx, M::PointerTag>,
         right: ImmTy<'tcx, M::PointerTag>,
-    ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
-        self.binary_op(
-            bin_op,
-            left.to_scalar()?, left.layout,
-            right.to_scalar()?, right.layout,
-        )
-    }
-
-    /// Returns the result of the specified operation and whether it overflowed.
-    pub fn binary_op(
-        &self,
-        bin_op: mir::BinOp,
-        left: Scalar<M::PointerTag>,
-        left_layout: TyLayout<'tcx>,
-        right: Scalar<M::PointerTag>,
-        right_layout: TyLayout<'tcx>,
     ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
         trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
-            bin_op, left, left_layout.ty, right, right_layout.ty);
+            bin_op, *left, left.layout.ty, *right, right.layout.ty);
 
-        match left_layout.ty.sty {
+        match left.layout.ty.sty {
             ty::Char => {
-                assert_eq!(left_layout.ty, right_layout.ty);
-                let left = left.to_char()?;
-                let right = right.to_char()?;
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar()?.to_char()?;
+                let right = right.to_scalar()?.to_char()?;
                 self.binary_char_op(bin_op, left, right)
             }
             ty::Bool => {
-                assert_eq!(left_layout.ty, right_layout.ty);
-                let left = left.to_bool()?;
-                let right = right.to_bool()?;
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar()?.to_bool()?;
+                let right = right.to_scalar()?.to_bool()?;
                 self.binary_bool_op(bin_op, left, right)
             }
             ty::Float(fty) => {
-                assert_eq!(left_layout.ty, right_layout.ty);
-                let left = left.to_bits(left_layout.size)?;
-                let right = right.to_bits(right_layout.size)?;
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_bits()?;
+                let right = right.to_bits()?;
                 self.binary_float_op(bin_op, fty, left, right)
             }
             _ => {
                 // Must be integer(-like) types.  Don't forget about == on fn pointers.
-                assert!(left_layout.ty.is_integral() || left_layout.ty.is_unsafe_ptr() ||
-                    left_layout.ty.is_fn());
-                assert!(right_layout.ty.is_integral() || right_layout.ty.is_unsafe_ptr() ||
-                    right_layout.ty.is_fn());
+                assert!(left.layout.ty.is_integral() || left.layout.ty.is_unsafe_ptr() ||
+                    left.layout.ty.is_fn());
+                assert!(right.layout.ty.is_integral() || right.layout.ty.is_unsafe_ptr() ||
+                    right.layout.ty.is_fn());
 
                 // Handle operations that support pointer values
-                if left.is_ptr() || right.is_ptr() || bin_op == mir::BinOp::Offset {
-                    return M::ptr_op(self, bin_op, left, left_layout, right, right_layout);
+                if left.to_scalar_ptr()?.is_ptr() ||
+                    right.to_scalar_ptr()?.is_ptr() ||
+                    bin_op == mir::BinOp::Offset
+                {
+                    return M::ptr_op(self, bin_op, left, right);
                 }
 
                 // Everything else only works with "proper" bits
-                let left = left.to_bits(left_layout.size).expect("we checked is_ptr");
-                let right = right.to_bits(right_layout.size).expect("we checked is_ptr");
-                self.binary_int_op(bin_op, left, left_layout, right, right_layout)
+                let l = left.to_bits().expect("we checked is_ptr");
+                let r = right.to_bits().expect("we checked is_ptr");
+                self.binary_int_op(bin_op, l, left.layout, r, right.layout)
             }
         }
     }
@@ -342,13 +328,14 @@ pub fn binary_op(
     pub fn unary_op(
         &self,
         un_op: mir::UnOp,
-        val: Scalar<M::PointerTag>,
-        layout: TyLayout<'tcx>,
+        val: ImmTy<'tcx, M::PointerTag>,
     ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
         use rustc::mir::UnOp::*;
         use rustc_apfloat::ieee::{Single, Double};
         use rustc_apfloat::Float;
 
+        let layout = val.layout;
+        let val = val.to_scalar()?;
         trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty.sty);
 
         match layout.ty.sty {
index 004a11e34d6e1d4b02f07b872b5bc6e99deb6041..b29e09900f6b1e2e1dc0b67eeda4ce7bb8c46837 100644 (file)
@@ -244,10 +244,10 @@ pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer<Tag>> {
     }
 }
 
-impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> {
+impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
     #[inline(always)]
     pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx, Tag>, Immediate<Tag>> {
-        match self.op {
+        match *self {
             Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
             Operand::Immediate(imm) => Err(imm),
         }
@@ -487,9 +487,9 @@ pub fn mplace_projection(
             Deref => self.deref_operand(base.into())?,
 
             Index(local) => {
-                let n = *self.frame().locals[local].access()?;
-                let n_layout = self.layout_of(self.tcx.types.usize)?;
-                let n = self.read_scalar(OpTy { op: n, layout: n_layout })?;
+                let layout = self.layout_of(self.tcx.types.usize)?;
+                let n = self.access_local(self.frame(), local, Some(layout))?;
+                let n = self.read_scalar(n)?;
                 let n = n.to_bits(self.tcx.data_layout.pointer_size)?;
                 self.mplace_field(base, u64::try_from(n).unwrap())?
             }
@@ -991,22 +991,6 @@ pub fn write_discriminant_index(
         Ok(())
     }
 
-    /// Every place can be read from, so we can turm them into an operand
-    #[inline(always)]
-    pub fn place_to_op(
-        &self,
-        place: PlaceTy<'tcx, M::PointerTag>
-    ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
-        let op = match place.place {
-            Place::Ptr(mplace) => {
-                Operand::Indirect(mplace)
-            }
-            Place::Local { frame, local } =>
-                *self.stack[frame].locals[local].access()?
-        };
-        Ok(OpTy { op, layout: place.layout })
-    }
-
     pub fn raw_const_to_mplace(
         &self,
         raw: RawConst<'tcx>,
index 0c988eb6810837b2ef83f282537dbb7253354576..97ef2b5fa3485342b1b58689ed74e204fb3b1721 100644 (file)
@@ -176,7 +176,7 @@ fn eval_rvalue_into_place(
             UnaryOp(un_op, ref operand) => {
                 // The operand always has the same type as the result.
                 let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
-                let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?;
+                let val = self.unary_op(un_op, val)?;
                 self.write_scalar(val, dest)?;
             }
 
index be50daa17092f1698eb180cb5814a98334878b1c..c2ee3f5715bd370bafa580a2acbb1522affdaeb2 100644 (file)
@@ -7,7 +7,7 @@
 
 use rustc::mir::interpret::{EvalResult, PointerArithmetic, EvalErrorKind, Scalar};
 use super::{
-    EvalContext, Machine, Immediate, OpTy, PlaceTy, MPlaceTy, Operand, StackPopCleanup
+    EvalContext, Machine, Immediate, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup
 };
 
 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
@@ -51,8 +51,8 @@ pub(super) fn eval_terminator(
                     // Compare using binary_op, to also support pointer values
                     let const_int = Scalar::from_uint(const_int, discr.layout.size);
                     let (res, _) = self.binary_op(mir::BinOp::Eq,
-                        discr.to_scalar()?, discr.layout,
-                        const_int, discr.layout,
+                        discr,
+                        ImmTy::from_scalar(const_int, discr.layout),
                     )?;
                     if res.to_bool()? {
                         target_block = targets[index];
@@ -418,8 +418,10 @@ fn eval_fn_call(
                 let mut args = args.to_vec();
                 let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty;
                 let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee);
-                args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?;
-                args[0].op = Operand::Immediate(Immediate::Scalar(ptr.ptr.into())); // strip vtable
+                args[0] = OpTy::from(ImmTy { // strip vtable
+                    layout: self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?,
+                    imm: Immediate::Scalar(ptr.ptr.into())
+                });
                 trace!("Patched self operand to {:#?}", args[0]);
                 // recurse with concrete function
                 self.eval_fn_call(instance, span, caller_abi, &args, dest, ret)
@@ -448,8 +450,8 @@ fn drop_in_place(
             _ => (instance, place),
         };
 
-        let arg = OpTy {
-            op: Operand::Immediate(place.to_ref()),
+        let arg = ImmTy {
+            imm: place.to_ref(),
             layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
         };
 
@@ -460,7 +462,7 @@ fn drop_in_place(
             instance,
             span,
             Abi::Rust,
-            &[arg],
+            &[arg.into()],
             Some(dest.into()),
             Some(target),
         )
index 65d7060b544d65a192b4ffd94424593048164ecc..1b0a9b17d3686cab83e21d001292b571fa1a859a 100644 (file)
@@ -22,6 +22,10 @@ pub fn get_vtable(
         let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
 
         if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) {
+            // This means we guarantee that there are no duplicate vtables, we will
+            // always use the same vtable for the same (Type, Trait) combination.
+            // That's not what happens in rustc, but emulating per-crate deduplication
+            // does not sound like it actually makes anything any better.
             return Ok(Pointer::from(vtable).with_default_tag());
         }
 
index 018f71c39e513793ea9b9c3e18e875e7415e19ad..7da00c4ea0c36f0d7bb1a80ce9f9308e47cf12d0 100644 (file)
     HasTyCtxt, TargetDataLayout, HasDataLayout,
 };
 
-use crate::interpret::{self, EvalContext, ScalarMaybeUndef, Immediate, OpTy, MemoryKind};
+use crate::interpret::{EvalContext, ScalarMaybeUndef, Immediate, OpTy, ImmTy, MemoryKind};
 use crate::const_eval::{
     CompileTimeInterpreter, error_to_const_error, eval_promoted, mk_eval_cx,
-    lazy_const_to_op,
 };
 use crate::transform::{MirPass, MirSource};
 
@@ -254,7 +253,7 @@ fn eval_constant(
         source_info: SourceInfo,
     ) -> Option<Const<'tcx>> {
         self.ecx.tcx.span = source_info.span;
-        match lazy_const_to_op(&self.ecx, *c.literal, c.ty) {
+        match self.ecx.lazy_const_to_op(*c.literal, c.ty) {
             Ok(op) => {
                 Some((op, c.span))
             },
@@ -345,15 +344,15 @@ fn const_prop(
             Rvalue::Len(_) => None,
             Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
                 type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some((
-                    OpTy {
-                        op: interpret::Operand::Immediate(Immediate::Scalar(
+                    ImmTy {
+                        imm: Immediate::Scalar(
                             Scalar::Bits {
                                 bits: n as u128,
                                 size: self.tcx.data_layout.pointer_size.bytes() as u8,
                             }.into()
-                        )),
+                        ),
                         layout: self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
-                    },
+                    }.into(),
                     span,
                 )))
             }
@@ -371,13 +370,12 @@ fn const_prop(
 
                 let (arg, _) = self.eval_operand(arg, source_info)?;
                 let val = self.use_ecx(source_info, |this| {
-                    let prim = this.ecx.read_scalar(arg)?.not_undef()?;
+                    let prim = this.ecx.read_immediate(arg)?;
                     match op {
                         UnOp::Neg => {
                             // Need to do overflow check here: For actual CTFE, MIR
                             // generation emits code that does this before calling the op.
-                            let size = arg.layout.size;
-                            if prim.to_bits(size)? == (1 << (size.bits() - 1)) {
+                            if prim.to_bits()? == (1 << (prim.layout.size.bits() - 1)) {
                                 return err!(OverflowNeg);
                             }
                         }
@@ -386,13 +384,13 @@ fn const_prop(
                         }
                     }
                     // Now run the actual operation.
-                    this.ecx.unary_op(op, prim, arg.layout)
+                    this.ecx.unary_op(op, prim)
                 })?;
-                let res = OpTy {
-                    op: interpret::Operand::Immediate(Immediate::Scalar(val.into())),
+                let res = ImmTy {
+                    imm: Immediate::Scalar(val.into()),
                     layout: place_layout,
                 };
-                Some((res, span))
+                Some((res.into(), span))
             }
             Rvalue::CheckedBinaryOp(op, ref left, ref right) |
             Rvalue::BinaryOp(op, ref left, ref right) => {
@@ -447,7 +445,7 @@ fn const_prop(
                 })?;
                 trace!("const evaluating {:?} for {:?} and {:?}", op, left, right);
                 let (val, overflow) = self.use_ecx(source_info, |this| {
-                    this.ecx.binary_op_imm(op, l, r)
+                    this.ecx.binary_op(op, l, r)
                 })?;
                 let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue {
                     Immediate::ScalarPair(
@@ -462,11 +460,11 @@ fn const_prop(
                     }
                     Immediate::Scalar(val.into())
                 };
-                let res = OpTy {
-                    op: interpret::Operand::Immediate(val),
+                let res = ImmTy {
+                    imm: val,
                     layout: place_layout,
                 };
-                Some((res, span))
+                Some((res.into(), span))
             },
         }
     }