]> git.lizzy.rs Git - rust.git/commitdiff
rustup for big refactor; kill most of validation
authorRalf Jung <post@ralfj.de>
Wed, 15 Aug 2018 19:01:40 +0000 (21:01 +0200)
committerRalf Jung <post@ralfj.de>
Thu, 16 Aug 2018 08:30:43 +0000 (10:30 +0200)
src/fn_call.rs
src/helpers.rs
src/intrinsic.rs
src/lib.rs
src/locks.rs
src/operator.rs
src/range_map.rs
src/tls.rs
src/validation.rs [deleted file]
tests/run-pass/atomic-compare_exchange.rs

index 509119beb35c43f894205f2d38355eb679050c4c..559f3adb90f13c47e93466e683e711d7a09698ae 100644 (file)
@@ -1,8 +1,7 @@
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, LayoutOf, Size};
+use rustc::ty;
+use rustc::ty::layout::{Align, LayoutOf, Size};
 use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
 use rustc::mir;
-use rustc_data_structures::indexed_vec::Idx;
 use syntax::attr;
 use syntax::codemap::Span;
 
 
 use super::memory::MemoryKind;
 
-fn write_discriminant_value<'a, 'mir, 'tcx: 'a + 'mir>(
-        ecx: &mut EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>>,
-        dest_ty: Ty<'tcx>,
-        dest: Place,
-        variant_index: usize,
-    ) -> EvalResult<'tcx> {
-        let layout = ecx.layout_of(dest_ty)?;
-
-        match layout.variants {
-            layout::Variants::Single { index } => {
-                if index != variant_index {
-                    // If the layout of an enum is `Single`, all
-                    // other variants are necessarily uninhabited.
-                    assert_eq!(layout.for_variant(&ecx, variant_index).abi,
-                               layout::Abi::Uninhabited);
-                }
-            }
-            layout::Variants::Tagged { .. } => {
-                let discr_val = dest_ty.ty_adt_def().unwrap()
-                    .discriminant_for_variant(*ecx.tcx, variant_index)
-                    .val;
-
-                let (discr_dest, discr) = ecx.place_field(dest, mir::Field::new(0), layout)?;
-                ecx.write_scalar(discr_dest, Scalar::from_uint(discr_val, discr.size), discr.ty)?;
-            }
-            layout::Variants::NicheFilling {
-                dataful_variant,
-                ref niche_variants,
-                niche_start,
-                ..
-            } => {
-                if variant_index != dataful_variant {
-                    let (niche_dest, niche) =
-                        ecx.place_field(dest, mir::Field::new(0), layout)?;
-                    let niche_value = ((variant_index - niche_variants.start()) as u128)
-                        .wrapping_add(niche_start);
-                    ecx.write_scalar(niche_dest, Scalar::from_uint(niche_value, niche.size), niche.ty)?;
-                }
-            }
-        }
-
-        Ok(())
-    }
-
 pub trait EvalContextExt<'tcx> {
     fn call_foreign_item(
         &mut self,
         def_id: DefId,
-        args: &[ValTy<'tcx>],
-        dest: Place,
-        dest_ty: Ty<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: PlaceTy<'tcx>,
         dest_block: mir::BasicBlock,
     ) -> EvalResult<'tcx>;
 
@@ -73,49 +27,46 @@ fn call_foreign_item(
     fn call_missing_fn(
         &mut self,
         instance: ty::Instance<'tcx>,
-        destination: Option<(Place, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
-        sig: ty::FnSig<'tcx>,
+        destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        args: &[OpTy<'tcx>],
         path: String,
     ) -> EvalResult<'tcx>;
 
     fn eval_fn_call(
         &mut self,
         instance: ty::Instance<'tcx>,
-        destination: Option<(Place, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
+        destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        args: &[OpTy<'tcx>],
         span: Span,
-        sig: ty::FnSig<'tcx>,
     ) -> EvalResult<'tcx, bool>;
 
-    fn write_null(&mut self, dest: Place, dest_layout: TyLayout<'tcx>) -> EvalResult<'tcx>;
+    fn write_null(&mut self, dest: PlaceTy<'tcx>) -> EvalResult<'tcx>;
 }
 
 impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
     fn eval_fn_call(
         &mut self,
         instance: ty::Instance<'tcx>,
-        destination: Option<(Place, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
+        destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        args: &[OpTy<'tcx>],
         span: Span,
-        sig: ty::FnSig<'tcx>,
     ) -> EvalResult<'tcx, bool> {
-        trace!("eval_fn_call: {:#?}, {:#?}", instance, destination);
+        trace!("eval_fn_call: {:#?}, {:?}", instance, destination.map(|(place, bb)| (*place, bb)));
 
         let def_id = instance.def_id();
         let item_path = self.tcx.absolute_item_path_str(def_id);
         match &*item_path {
             "std::sys::unix::thread::guard::init" | "std::sys::unix::thread::guard::current" => {
                 // Return None, as it doesn't make sense to return Some, because miri detects stack overflow itself.
-                let ret_ty = sig.output();
-                match ret_ty.sty {
+                let (return_place, return_to_block) = destination.unwrap();
+                match return_place.layout.ty.sty {
                     ty::TyAdt(ref adt_def, _) => {
                         assert!(adt_def.is_enum(), "Unexpected return type for {}", item_path);
                         let none_variant_index = adt_def.variants.iter().position(|def| {
                             def.name.as_str() == "None"
                         }).expect("No None variant");
-                        let (return_place, return_to_block) = destination.unwrap();
-                        write_discriminant_value(self, ret_ty, return_place, none_variant_index)?;
+
+                        self.write_discriminant_value(none_variant_index, return_place)?;
                         self.goto_block(return_to_block);
                         return Ok(true);
                     }
@@ -135,11 +86,9 @@ fn eval_fn_call(
             // FIXME: return a real value in case the target allocation has an
             // alignment bigger than the one requested
             let n = u128::max_value();
-            let amt = 128 - self.memory.pointer_size().bytes() * 8;
             let (dest, return_to_block) = destination.unwrap();
-            let ty = self.tcx.types.usize;
-            let ptr_size = self.memory.pointer_size();
-            self.write_scalar(dest, Scalar::from_uint((n << amt) >> amt, ptr_size), ty)?;
+            let n = self.truncate(n, dest.layout);
+            self.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
             self.goto_block(return_to_block);
             return Ok(true);
         }
@@ -151,7 +100,6 @@ fn eval_fn_call(
                     instance,
                     destination,
                     args,
-                    sig,
                     path,
                 )?;
                 return Ok(true);
@@ -160,8 +108,8 @@ fn eval_fn_call(
         };
 
         let (return_place, return_to_block) = match destination {
-            Some((place, block)) => (place, StackPopCleanup::Goto(block)),
-            None => (Place::undef(), StackPopCleanup::None),
+            Some((place, block)) => (*place, StackPopCleanup::Goto(block)),
+            None => (Place::null(&self), StackPopCleanup::None),
         };
 
         self.push_stack_frame(
@@ -178,9 +126,8 @@ fn eval_fn_call(
     fn call_foreign_item(
         &mut self,
         def_id: DefId,
-        args: &[ValTy<'tcx>],
-        dest: Place,
-        dest_ty: Ty<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: PlaceTy<'tcx>,
         dest_block: mir::BasicBlock,
     ) -> EvalResult<'tcx> {
         let attrs = self.tcx.get_attrs(def_id);
@@ -188,22 +135,21 @@ fn call_foreign_item(
             Some(name) => name.as_str(),
             None => self.tcx.item_name(def_id).as_str(),
         };
-        let dest_layout = self.layout_of(dest_ty)?;
 
         match &link_name[..] {
             "malloc" => {
-                let size = self.value_to_scalar(args[0])?.to_usize(self)?;
+                let size = self.read_scalar(args[0])?.to_usize(&self)?;
                 if size == 0 {
-                    self.write_null(dest, dest_layout)?;
+                    self.write_null(dest)?;
                 } else {
                     let align = self.tcx.data_layout.pointer_align;
                     let ptr = self.memory.allocate(Size::from_bytes(size), align, MemoryKind::C.into())?;
-                    self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
+                    self.write_scalar(Scalar::Ptr(ptr), dest)?;
                 }
             }
 
             "free" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
+                let ptr = self.read_scalar(args[0])?.not_undef()?;
                 if !ptr.is_null() {
                     self.memory.deallocate(
                         ptr.to_ptr()?,
@@ -214,8 +160,8 @@ fn call_foreign_item(
             }
 
             "__rust_alloc" => {
-                let size = self.value_to_scalar(args[0])?.to_usize(self)?;
-                let align = self.value_to_scalar(args[1])?.to_usize(self)?;
+                let size = self.read_scalar(args[0])?.to_usize(&self)?;
+                let align = self.read_scalar(args[1])?.to_usize(&self)?;
                 if size == 0 {
                     return err!(HeapAllocZeroBytes);
                 }
@@ -225,11 +171,11 @@ fn call_foreign_item(
                 let ptr = self.memory.allocate(Size::from_bytes(size),
                                                Align::from_bytes(align, align).unwrap(),
                                                MemoryKind::Rust.into())?;
-                self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
+                self.write_scalar(Scalar::Ptr(ptr), dest)?;
             }
             "__rust_alloc_zeroed" => {
-                let size = self.value_to_scalar(args[0])?.to_usize(self)?;
-                let align = self.value_to_scalar(args[1])?.to_usize(self)?;
+                let size = self.read_scalar(args[0])?.to_usize(&self)?;
+                let align = self.read_scalar(args[1])?.to_usize(&self)?;
                 if size == 0 {
                     return err!(HeapAllocZeroBytes);
                 }
@@ -240,12 +186,12 @@ fn call_foreign_item(
                                                Align::from_bytes(align, align).unwrap(),
                                                MemoryKind::Rust.into())?;
                 self.memory.write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
-                self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
+                self.write_scalar(Scalar::Ptr(ptr), dest)?;
             }
             "__rust_dealloc" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
-                let old_size = self.value_to_scalar(args[1])?.to_usize(self)?;
-                let align = self.value_to_scalar(args[2])?.to_usize(self)?;
+                let ptr = self.read_scalar(args[0])?.to_ptr()?;
+                let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
+                let align = self.read_scalar(args[2])?.to_usize(&self)?;
                 if old_size == 0 {
                     return err!(HeapAllocZeroBytes);
                 }
@@ -259,10 +205,10 @@ fn call_foreign_item(
                 )?;
             }
             "__rust_realloc" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
-                let old_size = self.value_to_scalar(args[1])?.to_usize(self)?;
-                let align = self.value_to_scalar(args[2])?.to_usize(self)?;
-                let new_size = self.value_to_scalar(args[3])?.to_usize(self)?;
+                let ptr = self.read_scalar(args[0])?.to_ptr()?;
+                let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
+                let align = self.read_scalar(args[2])?.to_usize(&self)?;
+                let new_size = self.read_scalar(args[3])?.to_usize(&self)?;
                 if old_size == 0 || new_size == 0 {
                     return err!(HeapAllocZeroBytes);
                 }
@@ -277,7 +223,7 @@ fn call_foreign_item(
                     Align::from_bytes(align, align).unwrap(),
                     MemoryKind::Rust.into(),
                 )?;
-                self.write_scalar(dest, Scalar::Ptr(new_ptr), dest_ty)?;
+                self.write_scalar(Scalar::Ptr(new_ptr), dest)?;
             }
 
             "syscall" => {
@@ -286,7 +232,7 @@ fn call_foreign_item(
                 //
                 // libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
                 // is called if a `HashMap` is created the regular way.
-                match self.value_to_scalar(args[0])?.to_usize(self)? {
+                match self.read_scalar(args[0])?.to_usize(&self)? {
                     318 | 511 => {
                         return err!(Unimplemented(
                             "miri does not support random number generators".to_owned(),
@@ -301,8 +247,8 @@ fn call_foreign_item(
             }
 
             "dlsym" => {
-                let _handle = self.into_ptr(args[0].value)?;
-                let symbol = self.into_ptr(args[1].value)?.unwrap_or_err()?.to_ptr()?;
+                let _handle = self.read_scalar(args[0])?;
+                let symbol = self.read_scalar(args[1])?.to_ptr()?;
                 let symbol_name = self.memory.read_c_str(symbol)?;
                 let err = format!("bad c unicode symbol: {:?}", symbol_name);
                 let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
@@ -315,20 +261,20 @@ fn call_foreign_item(
             "__rust_maybe_catch_panic" => {
                 // fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
                 // We abort on panic, so not much is going on here, but we still have to call the closure
-                let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
-                let f = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
-                let data = self.into_ptr(args[1].value)?.unwrap_or_err()?;
+                let f = self.read_scalar(args[0])?.to_ptr()?;
+                let data = self.read_scalar(args[1])?.not_undef()?;
                 let f_instance = self.memory.get_fn(f)?;
-                self.write_null(dest, dest_layout)?;
+                self.write_null(dest)?;
 
                 // Now we make a function call.  TODO: Consider making this re-usable?  EvalContext::step does sth. similar for the TLS dtors,
                 // and of course eval_main.
                 let mir = self.load_mir(f_instance.def)?;
+                let ret = Place::null(&self);
                 self.push_stack_frame(
                     f_instance,
                     mir.span,
                     mir,
-                    Place::undef(),
+                    ret,
                     StackPopCleanup::Goto(dest_block),
                 )?;
                 let mut args = self.frame().mir.args_iter();
@@ -340,12 +286,12 @@ fn call_foreign_item(
                     ),
                 )?;
                 let arg_dest = self.eval_place(&mir::Place::Local(arg_local))?;
-                self.write_ptr(arg_dest, data, u8_ptr_ty)?;
+                self.write_scalar(data, arg_dest)?;
 
                 assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
 
                 // We ourselves return 0
-                self.write_null(dest, dest_layout)?;
+                self.write_null(dest)?;
 
                 // Don't fall through
                 return Ok(());
@@ -356,9 +302,9 @@ fn call_foreign_item(
             }
 
             "memcmp" => {
-                let left = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let right = self.into_ptr(args[1].value)?.unwrap_or_err()?;
-                let n = Size::from_bytes(self.value_to_scalar(args[2])?.to_usize(self)?);
+                let left = self.read_scalar(args[0])?.not_undef()?;
+                let right = self.read_scalar(args[1])?.not_undef()?;
+                let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(&self)?);
 
                 let result = {
                     let left_bytes = self.memory.read_bytes(left, n)?;
@@ -373,58 +319,57 @@ fn call_foreign_item(
                 };
 
                 self.write_scalar(
-                    dest,
                     Scalar::from_i32(result),
-                    dest_ty,
+                    dest,
                 )?;
             }
 
             "memrchr" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let val = self.value_to_scalar(args[1])?.to_bytes()? as u8;
-                let num = self.value_to_scalar(args[2])?.to_usize(self)?;
+                let ptr = self.read_scalar(args[0])?.not_undef()?;
+                let val = self.read_scalar(args[1])?.to_bytes()? as u8;
+                let num = self.read_scalar(args[2])?.to_usize(&self)?;
                 if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().rev().position(
                     |&c| c == val,
                 )
                 {
                     let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), &self)?;
-                    self.write_ptr(dest, new_ptr, dest_ty)?;
+                    self.write_scalar(new_ptr, dest)?;
                 } else {
-                    self.write_null(dest, dest_layout)?;
+                    self.write_null(dest)?;
                 }
             }
 
             "memchr" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let val = self.value_to_scalar(args[1])?.to_bytes()? as u8;
-                let num = self.value_to_scalar(args[2])?.to_usize(self)?;
+                let ptr = self.read_scalar(args[0])?.not_undef()?;
+                let val = self.read_scalar(args[1])?.to_bytes()? as u8;
+                let num = self.read_scalar(args[2])?.to_usize(&self)?;
                 if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().position(
                     |&c| c == val,
                 )
                 {
                     let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), &self)?;
-                    self.write_ptr(dest, new_ptr, dest_ty)?;
+                    self.write_scalar(new_ptr, dest)?;
                 } else {
-                    self.write_null(dest, dest_layout)?;
+                    self.write_null(dest)?;
                 }
             }
 
             "getenv" => {
                 let result = {
-                    let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
+                    let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
                     let name = self.memory.read_c_str(name_ptr)?;
                     match self.machine.env_vars.get(name) {
                         Some(&var) => Scalar::Ptr(var),
                         None => Scalar::null(self.memory.pointer_size()),
                     }
                 };
-                self.write_scalar(dest, result, dest_ty)?;
+                self.write_scalar(result, dest)?;
             }
 
             "unsetenv" => {
                 let mut success = None;
                 {
-                    let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
+                    let name_ptr = self.read_scalar(args[0])?.not_undef()?;
                     if !name_ptr.is_null() {
                         let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
                         if !name.is_empty() && !name.contains(&b'=') {
@@ -436,17 +381,17 @@ fn call_foreign_item(
                     if let Some(var) = old {
                         self.memory.deallocate(var, None, MemoryKind::Env.into())?;
                     }
-                    self.write_null(dest, dest_layout)?;
+                    self.write_null(dest)?;
                 } else {
-                    self.write_scalar(dest, Scalar::from_int(-1, dest_layout.size), dest_ty)?;
+                    self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
                 }
             }
 
             "setenv" => {
                 let mut new = None;
                 {
-                    let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                    let value_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?.to_ptr()?;
+                    let name_ptr = self.read_scalar(args[0])?.not_undef()?;
+                    let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
                     let value = self.memory.read_c_str(value_ptr)?;
                     if !name_ptr.is_null() {
                         let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
@@ -472,16 +417,16 @@ fn call_foreign_item(
                     {
                         self.memory.deallocate(var, None, MemoryKind::Env.into())?;
                     }
-                    self.write_null(dest, dest_layout)?;
+                    self.write_null(dest)?;
                 } else {
-                    self.write_scalar(dest, Scalar::from_int(-1, dest_layout.size), dest_ty)?;
+                    self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
                 }
             }
 
             "write" => {
-                let fd = self.value_to_scalar(args[0])?.to_bytes()?;
-                let buf = self.into_ptr(args[1].value)?.unwrap_or_err()?;
-                let n = self.value_to_scalar(args[2])?.to_bytes()? as u64;
+                let fd = self.read_scalar(args[0])?.to_bytes()?;
+                let buf = self.read_scalar(args[1])?.not_undef()?;
+                let n = self.read_scalar(args[2])?.to_bytes()? as u64;
                 trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
                 let result = if fd == 1 || fd == 2 {
                     // stdout/stderr
@@ -501,36 +446,31 @@ fn call_foreign_item(
                     warn!("Ignored output to FD {}", fd);
                     n as i64 // pretend it all went well
                 }; // now result is the value we return back to the program
-                let ptr_size = self.memory.pointer_size();
                 self.write_scalar(
+                    Scalar::from_int(result, dest.layout.size),
                     dest,
-                    Scalar::from_int(result, ptr_size),
-                    dest_ty,
                 )?;
             }
 
             "strlen" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
+                let ptr = self.read_scalar(args[0])?.to_ptr()?;
                 let n = self.memory.read_c_str(ptr)?.len();
-                let ptr_size = self.memory.pointer_size();
-                self.write_scalar(dest, Scalar::from_uint(n as u64, ptr_size), dest_ty)?;
+                self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
             }
 
             // Some things needed for sys::thread initialization to go through
             "signal" | "sigaction" | "sigaltstack" => {
-                let ptr_size = self.memory.pointer_size();
-                self.write_scalar(dest, Scalar::null(ptr_size), dest_ty)?;
+                self.write_scalar(Scalar::null(dest.layout.size), dest)?;
             }
 
             "sysconf" => {
-                let name = self.value_to_scalar(args[0])?.to_usize(self)?;
-                let ptr_size = self.memory.pointer_size();
+                let name = self.read_scalar(args[0])?.to_usize(&self)?;
 
                 trace!("sysconf() called with name {}", name);
                 // cache the sysconf integers via miri's global cache
                 let paths = &[
-                    (&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, ptr_size)),
-                    (&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, ptr_size)),
+                    (&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
+                    (&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
                 ];
                 let mut result = None;
                 for &(path, path_value) in paths {
@@ -548,7 +488,7 @@ fn call_foreign_item(
                     }
                 }
                 if let Some(result) = result {
-                    self.write_scalar(dest, result, dest_ty)?;
+                    self.write_scalar(result, dest)?;
                 } else {
                     return err!(Unimplemented(
                         format!("Unimplemented sysconf name: {}", name),
@@ -558,10 +498,10 @@ fn call_foreign_item(
 
             // Hook pthread calls that go to the thread-local storage memory subsystem
             "pthread_key_create" => {
-                let key_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
+                let key_ptr = self.read_scalar(args[0])?.not_undef()?;
 
                 // Extract the function type out of the signature (that seems easier than constructing it ourselves...)
-                let dtor = match self.into_ptr(args[1].value)?.unwrap_or_err()? {
+                let dtor = match self.read_scalar(args[1])?.not_undef()? {
                     Scalar::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?),
                     Scalar::Bits { bits: 0, size } => {
                         assert_eq!(size as u64, self.memory.pointer_size().bytes());
@@ -571,7 +511,7 @@ fn call_foreign_item(
                 };
 
                 // Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
-                let key_type = args[0].ty.builtin_deref(true)
+                let key_type = args[0].layout.ty.builtin_deref(true)
                                    .ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
                 let key_layout = self.layout_of(key_type)?;
 
@@ -590,26 +530,26 @@ fn call_foreign_item(
                 )?;
 
                 // Return success (0)
-                self.write_null(dest, dest_layout)?;
+                self.write_null(dest)?;
             }
             "pthread_key_delete" => {
-                let key = self.value_to_scalar(args[0])?.to_bytes()?;
+                let key = self.read_scalar(args[0])?.to_bytes()?;
                 self.memory.delete_tls_key(key)?;
                 // Return success (0)
-                self.write_null(dest, dest_layout)?;
+                self.write_null(dest)?;
             }
             "pthread_getspecific" => {
-                let key = self.value_to_scalar(args[0])?.to_bytes()?;
+                let key = self.read_scalar(args[0])?.to_bytes()?;
                 let ptr = self.memory.load_tls(key)?;
-                self.write_ptr(dest, ptr, dest_ty)?;
+                self.write_scalar(ptr, dest)?;
             }
             "pthread_setspecific" => {
-                let key = self.value_to_scalar(args[0])?.to_bytes()?;
-                let new_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?;
+                let key = self.read_scalar(args[0])?.to_bytes()?;
+                let new_ptr = self.read_scalar(args[1])?.not_undef()?;
                 self.memory.store_tls(key, new_ptr)?;
 
                 // Return success (0)
-                self.write_null(dest, dest_layout)?;
+                self.write_null(dest)?;
             }
 
             "_tlv_atexit" => {
@@ -619,20 +559,19 @@ fn call_foreign_item(
             // Stub out all the other pthread calls to just return 0
             link_name if link_name.starts_with("pthread_") => {
                 debug!("ignoring C ABI call: {}", link_name);
-                self.write_null(dest, dest_layout)?;
+                self.write_null(dest)?;
             }
 
             "mmap" => {
                 // This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
-                let addr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                self.write_ptr(dest, addr, dest_ty)?;
+                let addr = self.read_scalar(args[0])?.not_undef()?;
+                self.write_scalar(addr, dest)?;
             }
 
             // Windows API subs
             "AddVectoredExceptionHandler" => {
                 // any non zero value works for the stdlib. This is just used for stackoverflows anyway
-                let ptr_size = self.memory.pointer_size();
-                self.write_scalar(dest, Scalar::from_int(1, ptr_size), dest_ty)?;
+                self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
             },
             "InitializeCriticalSection" |
             "EnterCriticalSection" |
@@ -645,11 +584,11 @@ fn call_foreign_item(
             "GetProcAddress" |
             "TryEnterCriticalSection" => {
                 // pretend these do not exist/nothing happened, by returning zero
-                self.write_scalar(dest, Scalar::from_int(0, dest_layout.size), dest_ty)?;
+                self.write_null(dest)?;
             },
             "GetLastError" => {
                 // this is c::ERROR_CALL_NOT_IMPLEMENTED
-                self.write_scalar(dest, Scalar::from_int(120, dest_layout.size), dest_ty)?;
+                self.write_scalar(Scalar::from_int(120, dest.layout.size), dest)?;
             },
 
             // Windows TLS
@@ -660,23 +599,23 @@ fn call_foreign_item(
                 let key = self.memory.create_tls_key(None) as u128;
 
                 // Figure out how large a TLS key actually is. This is c::DWORD.
-                if dest_layout.size.bits() < 128 && key >= (1u128 << dest_layout.size.bits() as u128) {
+                if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
                     return err!(OutOfTls);
                 }
-                self.write_scalar(dest, Scalar::from_uint(key, dest_layout.size), dest_layout.ty)?;
+                self.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
             }
             "TlsGetValue" => {
-                let key = self.value_to_scalar(args[0])?.to_bytes()?;
+                let key = self.read_scalar(args[0])?.to_bytes()?;
                 let ptr = self.memory.load_tls(key)?;
-                self.write_ptr(dest, ptr, dest_ty)?;
+                self.write_scalar(ptr, dest)?;
             }
             "TlsSetValue" => {
-                let key = self.value_to_scalar(args[0])?.to_bytes()?;
-                let new_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?;
+                let key = self.read_scalar(args[0])?.to_bytes()?;
+                let new_ptr = self.read_scalar(args[1])?.not_undef()?;
                 self.memory.store_tls(key, new_ptr)?;
 
                 // Return success (1)
-                self.write_scalar(dest, Scalar::from_int(1, dest_layout.size), dest_ty)?;
+                self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
             }
 
             // We can't execute anything else
@@ -690,7 +629,7 @@ fn call_foreign_item(
         // Since we pushed no stack frame, the main loop will act
         // as if the call just completed and it's returning to the
         // current frame.
-        self.dump_local(dest);
+        self.dump_place(*dest);
         self.goto_block(dest_block);
         Ok(())
     }
@@ -732,9 +671,8 @@ fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> {
     fn call_missing_fn(
         &mut self,
         instance: ty::Instance<'tcx>,
-        destination: Option<(Place, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
-        sig: ty::FnSig<'tcx>,
+        destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        args: &[OpTy<'tcx>],
         path: String,
     ) -> EvalResult<'tcx> {
         // In some cases in non-MIR libstd-mode, not having a destination is legit.  Handle these early.
@@ -745,7 +683,6 @@ fn call_missing_fn(
             _ => {}
         }
 
-        let dest_ty = sig.output();
         let (dest, dest_block) = destination.ok_or_else(
             || EvalErrorKind::NoMirFor(path.clone()),
         )?;
@@ -758,7 +695,6 @@ fn call_missing_fn(
                 instance.def_id(),
                 args,
                 dest,
-                dest_ty,
                 dest_block,
             )?;
             return Ok(());
@@ -784,8 +720,7 @@ fn call_missing_fn(
             "std::panicking::panicking" |
             "std::rt::panicking" => {
                 // we abort on panic -> `std::rt::panicking` always returns false
-                let bool = self.tcx.types.bool;
-                self.write_scalar(dest, Scalar::from_bool(false), bool)?;
+                self.write_scalar(Scalar::from_bool(false), dest)?;
             }
 
             _ => return err!(NoMirFor(path)),
@@ -794,12 +729,12 @@ fn call_missing_fn(
         // Since we pushed no stack frame, the main loop will act
         // as if the call just completed and it's returning to the
         // current frame.
-        self.dump_local(dest);
+        self.dump_place(*dest);
         self.goto_block(dest_block);
         Ok(())
     }
 
-    fn write_null(&mut self, dest: Place, dest_layout: TyLayout<'tcx>) -> EvalResult<'tcx> {
-        self.write_scalar(dest, Scalar::null(dest_layout.size), dest_layout.ty)
+    fn write_null(&mut self, dest: PlaceTy<'tcx>) -> EvalResult<'tcx> {
+        self.write_scalar(Scalar::null(dest.layout.size), dest)
     }
 }
index 8482c484608b741ad13dcc6fb538c99990b75940..606f1bb4ecb447e9cbcf0332b46bcfe30b1cbd23 100644 (file)
-use mir;
-use rustc::ty::Ty;
-use rustc::ty::layout::{LayoutOf, Size};
+use rustc::ty::layout::{Size, HasDataLayout};
 
-use super::{Scalar, ScalarExt, EvalResult, EvalContext, ValTy};
+use super::{Scalar, ScalarMaybeUndef, EvalResult};
 use rustc_mir::interpret::sign_extend;
 
-pub trait EvalContextExt<'tcx> {
-    fn wrapping_pointer_offset(
-        &self,
-        ptr: Scalar,
-        pointee_ty: Ty<'tcx>,
-        offset: i64,
-    ) -> EvalResult<'tcx, Scalar>;
-
-    fn pointer_offset(
-        &self,
-        ptr: Scalar,
-        pointee_ty: Ty<'tcx>,
-        offset: i64,
-    ) -> EvalResult<'tcx, Scalar>;
-
-    fn value_to_isize(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, i64>;
-
-    fn value_to_usize(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, u64>;
-
-    fn value_to_i32(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, i32>;
-
-    fn value_to_u8(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, u8>;
+pub trait ScalarExt {
+    fn null(size: Size) -> Self;
+    fn from_i32(i: i32) -> Self;
+    fn from_uint(i: impl Into<u128>, ptr_size: Size) -> Self;
+    fn from_int(i: impl Into<i128>, ptr_size: Size) -> Self;
+    fn from_f32(f: f32) -> Self;
+    fn from_f64(f: f64) -> Self;
+    fn is_null(self) -> bool;
 }
 
-impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
-    fn wrapping_pointer_offset(
-        &self,
-        ptr: Scalar,
-        pointee_ty: Ty<'tcx>,
-        offset: i64,
-    ) -> EvalResult<'tcx, Scalar> {
-        // FIXME: assuming here that type size is < i64::max_value()
-        let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
-        let offset = offset.overflowing_mul(pointee_size).0;
-        Ok(ptr.ptr_wrapping_signed_offset(offset, self))
-    }
-
-    fn pointer_offset(
-        &self,
-        ptr: Scalar,
-        pointee_ty: Ty<'tcx>,
-        offset: i64,
-    ) -> EvalResult<'tcx, Scalar> {
-        // This function raises an error if the offset moves the pointer outside of its allocation.  We consider
-        // ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0).
-        // We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own
-        // allocation.
-
-        if ptr.is_null() {
-            // NULL pointers must only be offset by 0
-            return if offset == 0 {
-                Ok(ptr)
-            } else {
-                err!(InvalidNullPointerUsage)
-            };
+pub trait FalibleScalarExt {
+    fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64>;
+    fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64>;
+    fn to_i32(self) -> EvalResult<'static, i32>;
+    fn to_u8(self) -> EvalResult<'static, u8>;
+
+    /// HACK: this function just extracts all bits if `defined != 0`
+    /// Mainly used for args of C-functions and we should totally correctly fetch the size
+    /// of their arguments
+    fn to_bytes(self) -> EvalResult<'static, u128>;
+}
+
+impl ScalarExt for Scalar {
+    fn null(size: Size) -> Self {
+        Scalar::Bits { bits: 0, size: size.bytes() as u8 }
+    }
+
+    fn from_i32(i: i32) -> Self {
+        Scalar::Bits { bits: i as u32 as u128, size: 4 }
+    }
+
+    fn from_uint(i: impl Into<u128>, size: Size) -> Self {
+        Scalar::Bits { bits: i.into(), size: size.bytes() as u8 }
+    }
+
+    fn from_int(i: impl Into<i128>, size: Size) -> Self {
+        Scalar::Bits { bits: i.into() as u128, size: size.bytes() as u8 }
+    }
+
+    fn from_f32(f: f32) -> Self {
+        Scalar::Bits { bits: f.to_bits() as u128, size: 4 }
+    }
+
+    fn from_f64(f: f64) -> Self {
+        Scalar::Bits { bits: f.to_bits() as u128, size: 8 }
+    }
+
+    fn is_null(self) -> bool {
+        match self {
+            Scalar::Bits { bits, .. } => bits == 0,
+            Scalar::Ptr(_) => false
         }
-        // FIXME: assuming here that type size is < i64::max_value()
-        let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
-         if let Some(offset) = offset.checked_mul(pointee_size) {
-            let ptr = ptr.ptr_signed_offset(offset, self)?;
-            // Do not do bounds-checking for integers; they can never alias a normal pointer anyway.
-            if let Scalar::Ptr(ptr) = ptr {
-                self.memory.check_bounds(ptr, false)?;
-            } else if ptr.is_null() {
-                // We moved *to* a NULL pointer.  That seems wrong, LLVM considers the NULL pointer its own small allocation.  Reject this, for now.
-                return err!(InvalidNullPointerUsage);
-            }
-            Ok(ptr)
-        } else {
-            err!(Overflow(mir::BinOp::Mul))
+    }
+}
+
+impl FalibleScalarExt for Scalar {
+    fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> {
+        let b = self.to_bits(cx.data_layout().pointer_size)?;
+        assert_eq!(b as u64 as u128, b);
+        Ok(b as u64)
+    }
+
+    fn to_u8(self) -> EvalResult<'static, u8> {
+        let sz = Size::from_bits(8);
+        let b = self.to_bits(sz)?;
+        assert_eq!(b as u8 as u128, b);
+        Ok(b as u8)
+    }
+
+    fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> {
+        let b = self.to_bits(cx.data_layout().pointer_size)?;
+        let b = sign_extend(b, cx.data_layout().pointer_size) as i128;
+        assert_eq!(b as i64 as i128, b);
+        Ok(b as i64)
+    }
+
+    fn to_i32(self) -> EvalResult<'static, i32> {
+        let sz = Size::from_bits(32);
+        let b = self.to_bits(sz)?;
+        let b = sign_extend(b, sz) as i128;
+        assert_eq!(b as i32 as i128, b);
+        Ok(b as i32)
+    }
+
+    fn to_bytes(self) -> EvalResult<'static, u128> {
+        match self {
+            Scalar::Bits { bits, size } => {
+                assert_ne!(size, 0);
+                Ok(bits)
+            },
+            Scalar::Ptr(_) => err!(ReadPointerAsBytes),
         }
     }
+}
+
+impl FalibleScalarExt for ScalarMaybeUndef {
+    fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> {
+        self.not_undef()?.to_usize(cx)
+    }
+
+    fn to_u8(self) -> EvalResult<'static, u8> {
+        self.not_undef()?.to_u8()
+    }
+
+    fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> {
+        self.not_undef()?.to_isize(cx)
+    }
+
+    fn to_i32(self) -> EvalResult<'static, i32> {
+        self.not_undef()?.to_i32()
+    }
 
-    fn value_to_isize(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, i64> {
-        assert_eq!(value.ty, self.tcx.types.isize);
-        let raw = self.value_to_scalar(value)?.to_bits(self.memory.pointer_size())?;
-        let raw = sign_extend(raw, self.layout_of(self.tcx.types.isize).unwrap());
-        Ok(raw as i64)
-    }
-
-    fn value_to_usize(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, u64> {
-        assert_eq!(value.ty, self.tcx.types.usize);
-        self.value_to_scalar(value)?.to_bits(self.memory.pointer_size()).map(|v| v as u64)
-    }
-
-    fn value_to_i32(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, i32> {
-        assert_eq!(value.ty, self.tcx.types.i32);
-        let raw = self.value_to_scalar(value)?.to_bits(Size::from_bits(32))?;
-        let raw = sign_extend(raw, self.layout_of(self.tcx.types.i32).unwrap());
-        Ok(raw as i32)
-    }
-
-    fn value_to_u8(
-        &self,
-        value: ValTy<'tcx>,
-    ) -> EvalResult<'tcx, u8> {
-        assert_eq!(value.ty, self.tcx.types.u8);
-        self.value_to_scalar(value)?.to_bits(Size::from_bits(8)).map(|v| v as u8)
+    fn to_bytes(self) -> EvalResult<'static, u128> {
+        self.not_undef()?.to_bytes()
     }
 }
index cd953ba7c569351e5ba2ea39f975118a622895a5..631653b97b6b601c8263598a95342ffbbd7f01f2 100644 (file)
@@ -1,21 +1,20 @@
 use rustc::mir;
-use rustc::ty::layout::{TyLayout, LayoutOf, Size, Primitive, Integer::*};
+use rustc::ty::layout::{self, LayoutOf, Size, Primitive, Integer::*};
 use rustc::ty;
 
-use rustc::mir::interpret::{EvalResult, Scalar, Value, ScalarMaybeUndef};
-use rustc_mir::interpret::{Place, PlaceExtra, HasMemory, EvalContext, ValTy};
+use rustc::mir::interpret::{EvalResult, Scalar, ScalarMaybeUndef};
+use rustc_mir::interpret::{
+    PlaceExtra, PlaceTy, EvalContext, OpTy, Value
+};
 
-use helpers::EvalContextExt as HelperEvalContextExt;
-
-use super::ScalarExt;
+use super::{ScalarExt, FalibleScalarExt, OperatorEvalContextExt};
 
 pub trait EvalContextExt<'tcx> {
     fn call_intrinsic(
         &mut self,
         instance: ty::Instance<'tcx>,
-        args: &[ValTy<'tcx>],
-        dest: Place,
-        dest_layout: TyLayout<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: PlaceTy<'tcx>,
         target: mir::BasicBlock,
     ) -> EvalResult<'tcx>;
 }
@@ -24,9 +23,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
     fn call_intrinsic(
         &mut self,
         instance: ty::Instance<'tcx>,
-        args: &[ValTy<'tcx>],
-        dest: Place,
-        dest_layout: TyLayout<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: PlaceTy<'tcx>,
         target: mir::BasicBlock,
     ) -> EvalResult<'tcx> {
         let substs = instance.substs;
@@ -34,44 +32,51 @@ fn call_intrinsic(
         let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
         match intrinsic_name {
             "add_with_overflow" => {
-                self.intrinsic_with_overflow(
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                self.binop_with_overflow(
                     mir::BinOp::Add,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?
             }
 
             "sub_with_overflow" => {
-                self.intrinsic_with_overflow(
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                self.binop_with_overflow(
                     mir::BinOp::Sub,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?
             }
 
             "mul_with_overflow" => {
-                self.intrinsic_with_overflow(
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                self.binop_with_overflow(
                     mir::BinOp::Mul,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?
             }
 
             "arith_offset" => {
-                let offset = self.value_to_isize(args[1])?;
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
-                self.write_ptr(dest, result_ptr, dest_layout.ty)?;
+                let offset = self.read_scalar(args[1])?.to_isize(&self)?;
+                let ptr = self.read_scalar(args[0])?.not_undef()?;
+
+                let pointee_ty = substs.type_at(0);
+                let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
+                let offset = offset.overflowing_mul(pointee_size).0;
+                let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
+                self.write_scalar(result_ptr, dest)?;
             }
 
             "assume" => {
-                let cond = self.value_to_scalar(args[0])?.to_bool()?;
+                let cond = self.read_scalar(args[0])?.to_bool()?;
                 if !cond {
                     return err!(AssumptionNotHeld);
                 }
@@ -81,24 +86,18 @@ fn call_intrinsic(
             "atomic_load_relaxed" |
             "atomic_load_acq" |
             "volatile_load" => {
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let align = self.layout_of(args[0].ty)?.align;
-
-                let valty = ValTy {
-                    value: Value::ByRef(ptr, align),
-                    ty: substs.type_at(0),
-                };
-                self.write_value(valty, dest)?;
+                let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
+                self.write_scalar(val, dest)?;
             }
 
             "atomic_store" |
             "atomic_store_relaxed" |
             "atomic_store_rel" |
             "volatile_store" => {
-                let ty = substs.type_at(0);
-                let align = self.layout_of(ty)?.align;
-                let dest = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                self.write_value_to_ptr(args[1].value, dest, align, ty)?;
+                let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
+                self.write_scalar(val, ptr.into())?;
             }
 
             "atomic_fence_acq" => {
@@ -106,47 +105,26 @@ fn call_intrinsic(
             }
 
             _ if intrinsic_name.starts_with("atomic_xchg") => {
-                let ty = substs.type_at(0);
-                let align = self.layout_of(ty)?.align;
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let change = self.value_to_scalar(args[1])?;
-                let old = self.read_value(ptr, align, ty)?;
-                let old = match old {
-                    Value::Scalar(val) => val,
-                    Value::ByRef { .. } => bug!("just read the value, can't be byref"),
-                    Value::ScalarPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
-                };
-                self.write_scalar(dest, old, ty)?;
-                self.write_scalar(
-                    Place::from_scalar_ptr(ptr.into(), align),
-                    change,
-                    ty,
-                )?;
+                let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let new = self.read_scalar(args[1])?;
+                let old = self.read_scalar(ptr.into())?;
+                self.write_scalar(old, dest)?; // old value is returned
+                self.write_scalar(new, ptr.into())?;
             }
 
             _ if intrinsic_name.starts_with("atomic_cxchg") => {
-                let ty = substs.type_at(0);
-                let align = self.layout_of(ty)?.align;
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let expect_old = self.value_to_scalar(args[1])?;
-                let change = self.value_to_scalar(args[2])?;
-                let old = self.read_value(ptr, align, ty)?;
-                let old = match old {
-                    Value::Scalar(val) => val.unwrap_or_err()?,
-                    Value::ByRef { .. } => bug!("just read the value, can't be byref"),
-                    Value::ScalarPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
-                };
-                let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
-                let valty = ValTy {
-                    value: Value::ScalarPair(old.into(), val.into()),
-                    ty: dest_layout.ty,
-                };
-                self.write_value(valty, dest)?;
-                self.write_scalar(
-                    Place::from_scalar_ptr(ptr.into(), dest_layout.align),
-                    change,
-                    ty,
-                )?;
+                let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op()`
+                let new = self.read_scalar(args[2])?;
+                let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op()`
+                // binary_op will bail if either of them is not a scalar
+                let (eq, _) = self.binary_op(mir::BinOp::Eq, old, expect_old)?;
+                let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
+                self.write_value(res, dest)?; // old value is returned
+                // update ptr depending on comparison
+                if eq.to_bool()? {
+                    self.write_scalar(new, ptr.into())?;
+                }
             }
 
             "atomic_or" |
@@ -174,19 +152,10 @@ fn call_intrinsic(
             "atomic_xsub_rel" |
             "atomic_xsub_acqrel" |
             "atomic_xsub_relaxed" => {
-                let ty = substs.type_at(0);
-                let align = self.layout_of(ty)?.align;
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let change = self.value_to_scalar(args[1])?;
-                let old = self.read_value(ptr, align, ty)?;
-                let old = match old {
-                    Value::Scalar(val) => val,
-                    Value::ByRef { .. } => bug!("just read the value, can't be byref"),
-                    Value::ScalarPair(..) => {
-                        bug!("atomic_xadd_relaxed doesn't work with nonprimitives")
-                    }
-                };
-                self.write_scalar(dest, old, ty)?;
+                let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let rhs = self.read_value(args[1])?;
+                let old = self.read_value(ptr.into())?;
+                self.write_value(*old, dest)?; // old value is returned
                 let op = match intrinsic_name.split('_').nth(1).unwrap() {
                     "or" => mir::BinOp::BitOr,
                     "xor" => mir::BinOp::BitXor,
@@ -196,8 +165,8 @@ fn call_intrinsic(
                     _ => bug!(),
                 };
                 // FIXME: what do atomics do on overflow?
-                let (val, _) = self.binary_op(op, old.unwrap_or_err()?, ty, change, ty)?;
-                self.write_scalar(Place::from_scalar_ptr(ptr.into(), dest_layout.align), val, ty)?;
+                let (val, _) = self.binary_op(op, old, rhs)?;
+                self.write_scalar(val, ptr.into())?;
             }
 
             "breakpoint" => unimplemented!(), // halt miri
@@ -207,13 +176,13 @@ fn call_intrinsic(
                 let elem_ty = substs.type_at(0);
                 let elem_layout = self.layout_of(elem_ty)?;
                 let elem_size = elem_layout.size.bytes();
-                let count = self.value_to_usize(args[2])?;
+                let count = self.read_scalar(args[2])?.to_usize(&self)?;
                 if count * elem_size != 0 {
                     // TODO: We do not even validate alignment for the 0-bytes case.  libstd relies on this in vec::IntoIter::next.
                     // Also see the write_bytes intrinsic.
                     let elem_align = elem_layout.align;
-                    let src = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                    let dest = self.into_ptr(args[1].value)?.unwrap_or_err()?;
+                    let src = self.read_scalar(args[0])?.not_undef()?;
+                    let dest = self.read_scalar(args[1])?.not_undef()?;
                     self.memory.copy(
                         src,
                         elem_align,
@@ -227,7 +196,7 @@ fn call_intrinsic(
 
             "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
                 let ty = substs.type_at(0);
-                let num = self.value_to_scalar(args[0])?.to_bytes()?;
+                let num = self.read_scalar(args[0])?.to_bytes()?;
                 let kind = match self.layout_of(ty)?.abi {
                     ty::layout::Abi::Scalar(ref scalar) => scalar.value,
                     _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
@@ -240,22 +209,18 @@ fn call_intrinsic(
                 } else {
                     numeric_intrinsic(intrinsic_name, num, kind)?
                 };
-                self.write_scalar(dest, num, ty)?;
+                self.write_scalar(num, dest)?;
             }
 
             "discriminant_value" => {
-                let ty = substs.type_at(0);
-                let layout = self.layout_of(ty)?;
-                let adt_ptr = self.into_ptr(args[0].value)?;
-                let adt_align = self.layout_of(args[0].ty)?.align;
-                let place = Place::from_scalar_ptr(adt_ptr, adt_align);
-                let discr_val = self.read_discriminant_value(place, layout)?;
-                self.write_scalar(dest, Scalar::from_uint(discr_val, dest_layout.size), dest_layout.ty)?;
+                let place = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let discr_val = self.read_discriminant_value(place.into())?;
+                self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
             }
 
             "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
             "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
-                let f = self.value_to_scalar(args[0])?.to_bytes()?;
+                let f = self.read_scalar(args[0])?.to_bytes()?;
                 let f = f32::from_bits(f as u32);
                 let f = match intrinsic_name {
                     "sinf32" => f.sin(),
@@ -272,12 +237,12 @@ fn call_intrinsic(
                     "truncf32" => f.trunc(),
                     _ => bug!(),
                 };
-                self.write_scalar(dest, Scalar::from_f32(f), dest_layout.ty)?;
+                self.write_scalar(Scalar::from_f32(f), dest)?;
             }
 
             "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
             "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
-                let f = self.value_to_scalar(args[0])?.to_bytes()?;
+                let f = self.read_scalar(args[0])?.to_bytes()?;
                 let f = f64::from_bits(f as u64);
                 let f = match intrinsic_name {
                     "sinf64" => f.sin(),
@@ -294,13 +259,12 @@ fn call_intrinsic(
                     "truncf64" => f.trunc(),
                     _ => bug!(),
                 };
-                self.write_scalar(dest, Scalar::from_f64(f), dest_layout.ty)?;
+                self.write_scalar(Scalar::from_f64(f), dest)?;
             }
 
             "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
-                let ty = substs.type_at(0);
-                let a = self.value_to_scalar(args[0])?;
-                let b = self.value_to_scalar(args[1])?;
+                let a = self.read_value(args[0])?;
+                let b = self.read_value(args[1])?;
                 let op = match intrinsic_name {
                     "fadd_fast" => mir::BinOp::Add,
                     "fsub_fast" => mir::BinOp::Sub,
@@ -309,48 +273,43 @@ fn call_intrinsic(
                     "frem_fast" => mir::BinOp::Rem,
                     _ => bug!(),
                 };
-                let result = self.binary_op(op, a, ty, b, ty)?;
-                self.write_scalar(dest, result.0, dest_layout.ty)?;
+                let result = self.binary_op(op, a, b)?;
+                self.write_scalar(result.0, dest)?;
             }
 
             "exact_div" => {
                 // Performs an exact division, resulting in undefined behavior where
                 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
-                let ty = substs.type_at(0);
-                let a = self.value_to_scalar(args[0])?;
-                let b = self.value_to_scalar(args[1])?;
+                let a = self.read_value(args[0])?;
+                let b = self.read_value(args[1])?;
                 // check x % y != 0
-                if !self.binary_op(mir::BinOp::Rem, a, ty, b, ty)?.0.is_null() {
+                if !self.binary_op(mir::BinOp::Rem, a, b)?.0.is_null() {
                     return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
                 }
-                let result = self.binary_op(mir::BinOp::Div, a, ty, b, ty)?;
-                self.write_scalar(dest, result.0, dest_layout.ty)?;
+                let result = self.binary_op(mir::BinOp::Div, a, b)?;
+                self.write_scalar(result.0, dest)?;
             },
 
             "likely" | "unlikely" | "forget" => {}
 
             "init" => {
-                // we don't want to force an allocation in case the destination is a simple value
-                match dest {
-                    Place::Local { frame, local } => {
-                        match self.stack()[frame].locals[local].access()? {
-                            Value::ByRef(ptr, _) => {
-                                // These writes have no alignment restriction anyway.
-                                self.memory.write_repeat(ptr, 0, dest_layout.size)?;
-                            }
-                            Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?,
-                            Value::ScalarPair(..) => {
-                                self.write_value(ValTy { value: Value::ScalarPair(Scalar::null(dest_layout.size).into(), Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?;
-                            }
-                        }
-                    },
-                    Place::Ptr {
-                        ptr,
-                        align: _align,
-                        extra: PlaceExtra::None,
-                    } => self.memory.write_repeat(ptr.unwrap_or_err()?, 0, dest_layout.size)?,
-                    Place::Ptr { .. } => {
-                        bug!("init intrinsic tried to write to fat or unaligned ptr target")
+                // Check fast path: we don't want to force an allocation in case the destination is a simple value,
+                // but we also do not want to create a new allocation with 0s and then copy that over.
+                match dest.layout.abi {
+                    layout::Abi::Scalar(ref s) => {
+                        let x = Scalar::null(s.value.size(&self));
+                        self.write_value(Value::Scalar(x.into()), dest)?;
+                    }
+                    layout::Abi::ScalarPair(ref s1, ref s2) => {
+                        let x = Scalar::null(s1.value.size(&self));
+                        let y = Scalar::null(s2.value.size(&self));
+                        self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
+                    }
+                    _ => {
+                        // Do it in memory
+                        let mplace = self.force_allocation(dest)?;
+                        assert_eq!(mplace.extra, PlaceExtra::None);
+                        self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
                     }
                 }
             }
@@ -360,7 +319,7 @@ fn call_intrinsic(
                 let elem_align = self.layout_of(elem_ty)?.align.abi();
                 let ptr_size = self.memory.pointer_size();
                 let align_val = Scalar::from_uint(elem_align as u128, ptr_size);
-                self.write_scalar(dest, align_val, dest_layout.ty)?;
+                self.write_scalar(align_val, dest)?;
             }
 
             "pref_align_of" => {
@@ -369,14 +328,12 @@ fn call_intrinsic(
                 let align = layout.align.pref();
                 let ptr_size = self.memory.pointer_size();
                 let align_val = Scalar::from_uint(align as u128, ptr_size);
-                self.write_scalar(dest, align_val, dest_layout.ty)?;
+                self.write_scalar(align_val, dest)?;
             }
 
             "move_val_init" => {
-                let ty = substs.type_at(0);
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let align = self.layout_of(args[0].ty)?.align;
-                self.write_value_to_ptr(args[1].value, ptr, align, ty)?;
+                let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+                self.copy_op(args[1], ptr.into())?;
             }
 
             "needs_drop" => {
@@ -384,120 +341,116 @@ fn call_intrinsic(
                 let env = ty::ParamEnv::reveal_all();
                 let needs_drop = ty.needs_drop(self.tcx.tcx, env);
                 self.write_scalar(
-                    dest,
                     Scalar::from_bool(needs_drop),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "offset" => {
-                let offset = self.value_to_isize(args[1])?;
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
-                self.write_ptr(dest, result_ptr, dest_layout.ty)?;
+                let offset = self.read_scalar(args[1])?.to_isize(&self)?;
+                let ptr = self.read_scalar(args[0])?.not_undef()?;
+                let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
+                self.write_scalar(result_ptr, dest)?;
             }
 
             "overflowing_sub" => {
-                self.intrinsic_overflowing(
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                self.binop_ignore_overflow(
                     mir::BinOp::Sub,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "overflowing_mul" => {
-                self.intrinsic_overflowing(
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                self.binop_ignore_overflow(
                     mir::BinOp::Mul,
-                    args[0],
-                    args[1],
+                    r,
+                    l,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "overflowing_add" => {
-                self.intrinsic_overflowing(
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                self.binop_ignore_overflow(
                     mir::BinOp::Add,
-                    args[0],
-                    args[1],
+                    r,
+                    l,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "powf32" => {
-                let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
+                let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
                 let f = f32::from_bits(f as u32);
-                let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
+                let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
                 let f2 = f32::from_bits(f2 as u32);
                 self.write_scalar(
-                    dest,
                     Scalar::from_f32(f.powf(f2)),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "powf64" => {
-                let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
+                let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
                 let f = f64::from_bits(f as u64);
-                let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
+                let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
                 let f2 = f64::from_bits(f2 as u64);
                 self.write_scalar(
-                    dest,
                     Scalar::from_f64(f.powf(f2)),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "fmaf32" => {
-                let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
+                let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
                 let a = f32::from_bits(a as u32);
-                let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
+                let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
                 let b = f32::from_bits(b as u32);
-                let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(32))?;
+                let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(32))?;
                 let c = f32::from_bits(c as u32);
                 self.write_scalar(
-                    dest,
                     Scalar::from_f32(a * b + c),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "fmaf64" => {
-                let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
+                let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
                 let a = f64::from_bits(a as u64);
-                let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
+                let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
                 let b = f64::from_bits(b as u64);
-                let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(64))?;
+                let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(64))?;
                 let c = f64::from_bits(c as u64);
                 self.write_scalar(
-                    dest,
                     Scalar::from_f64(a * b + c),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "powif32" => {
-                let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
+                let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
                 let f = f32::from_bits(f as u32);
-                let i = self.value_to_i32(args[1])?;
+                let i = self.read_scalar(args[1])?.to_i32()?;
                 self.write_scalar(
-                    dest,
                     Scalar::from_f32(f.powi(i)),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "powif64" => {
-                let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
+                let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
                 let f = f64::from_bits(f as u64);
-                let i = self.value_to_i32(args[1])?;
+                let i = self.read_scalar(args[1])?.to_i32()?;
                 self.write_scalar(
-                    dest,
                     Scalar::from_f64(f.powi(i)),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
@@ -505,29 +458,27 @@ fn call_intrinsic(
                 let ty = substs.type_at(0);
                 let size = self.layout_of(ty)?.size.bytes();
                 let ptr_size = self.memory.pointer_size();
-                self.write_scalar(dest, Scalar::from_uint(size, ptr_size), dest_layout.ty)?;
+                self.write_scalar(Scalar::from_uint(size, ptr_size), dest)?;
             }
 
             "size_of_val" => {
-                let ty = substs.type_at(0);
-                let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?;
+                let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let (size, _) = self.size_and_align_of_mplace(mplace)?;
                 let ptr_size = self.memory.pointer_size();
                 self.write_scalar(
-                    dest,
                     Scalar::from_uint(size.bytes() as u128, ptr_size),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
             "min_align_of_val" |
             "align_of_val" => {
-                let ty = substs.type_at(0);
-                let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?;
+                let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
+                let (_, align) = self.size_and_align_of_mplace(mplace)?;
                 let ptr_size = self.memory.pointer_size();
                 self.write_scalar(
-                    dest,
                     Scalar::from_uint(align.abi(), ptr_size),
-                    dest_layout.ty,
+                    dest,
                 )?;
             }
 
@@ -535,110 +486,105 @@ fn call_intrinsic(
                 let ty = substs.type_at(0);
                 let ty_name = ty.to_string();
                 let value = self.str_to_value(&ty_name)?;
-                self.write_value(ValTy { value, ty: dest_layout.ty }, dest)?;
+                self.write_value(value, dest)?;
             }
             "type_id" => {
                 let ty = substs.type_at(0);
                 let n = self.tcx.type_id_hash(ty);
-                self.write_scalar(dest, Scalar::Bits { bits: n as u128, size: 8 }, dest_layout.ty)?;
+                self.write_scalar(Scalar::Bits { bits: n as u128, size: 8 }, dest)?;
             }
 
             "transmute" => {
-                let src_ty = substs.type_at(0);
-                let _src_align = self.layout_of(src_ty)?.align;
-                let ptr = self.force_allocation(dest)?.to_ptr()?;
-                let dest_align = self.layout_of(substs.type_at(1))?.align;
-                self.write_value_to_ptr(args[0].value, ptr.into(), dest_align, src_ty).unwrap();
+                // Go through an allocation, to make sure the completely different layouts
+                // do not pose a problem.  (When the user transmutes through a union,
+                // there will not be a layout mismatch.)
+                let dest = self.force_allocation(dest)?;
+                self.copy_op(args[0], dest.into())?;
             }
 
             "unchecked_shl" => {
-                let bits = dest_layout.size.bytes() as u128 * 8;
-                let rhs = self.value_to_scalar(args[1])?
-                    .to_bytes()?;
-                if rhs >= bits {
+                let bits = dest.layout.size.bytes() as u128 * 8;
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                let rval = r.to_scalar()?.to_bytes()?;
+                if rval >= bits {
                     return err!(Intrinsic(
-                        format!("Overflowing shift by {} in unchecked_shl", rhs),
+                        format!("Overflowing shift by {} in unchecked_shl", rval),
                     ));
                 }
-                self.intrinsic_overflowing(
+                self.binop_ignore_overflow(
                     mir::BinOp::Shl,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "unchecked_shr" => {
-                let bits = dest_layout.size.bytes() as u128 * 8;
-                let rhs = self.value_to_scalar(args[1])?
-                    .to_bytes()?;
-                if rhs >= bits {
+                let bits = dest.layout.size.bytes() as u128 * 8;
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                let rval = r.to_scalar()?.to_bytes()?;
+                if rval >= bits {
                     return err!(Intrinsic(
-                        format!("Overflowing shift by {} in unchecked_shr", rhs),
+                        format!("Overflowing shift by {} in unchecked_shr", rval),
                     ));
                 }
-                self.intrinsic_overflowing(
+                self.binop_ignore_overflow(
                     mir::BinOp::Shr,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "unchecked_div" => {
-                let rhs = self.value_to_scalar(args[1])?
-                    .to_bytes()?;
-                if rhs == 0 {
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                let rval = r.to_scalar()?.to_bytes()?;
+                if rval == 0 {
                     return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
                 }
-                self.intrinsic_overflowing(
+                self.binop_ignore_overflow(
                     mir::BinOp::Div,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "unchecked_rem" => {
-                let rhs = self.value_to_scalar(args[1])?
-                    .to_bytes()?;
-                if rhs == 0 {
+                let l = self.read_value(args[0])?;
+                let r = self.read_value(args[1])?;
+                let rval = r.to_scalar()?.to_bytes()?;
+                if rval == 0 {
                     return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
                 }
-                self.intrinsic_overflowing(
+                self.binop_ignore_overflow(
                     mir::BinOp::Rem,
-                    args[0],
-                    args[1],
+                    l,
+                    r,
                     dest,
-                    dest_layout.ty,
                 )?;
             }
 
             "uninit" => {
-                // we don't want to force an allocation in case the destination is a simple value
-                match dest {
-                    Place::Local { frame, local } => {
-                        match self.stack()[frame].locals[local].access()? {
-                            Value::ByRef(ptr, _) => {
-                                // These writes have no alignment restriction anyway.
-                                self.memory.mark_definedness(ptr, dest_layout.size, false)?;
-                            }
-                            Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?,
-                            Value::ScalarPair(..) => {
-                                self.write_value(ValTy { value: Value::ScalarPair(ScalarMaybeUndef::Undef, ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?;
-                            }
-                        }
-                    },
-                    Place::Ptr {
-                        ptr,
-                        align: _align,
-                        extra: PlaceExtra::None,
-                    } => self.memory.mark_definedness(ptr.unwrap_or_err()?, dest_layout.size, false)?,
-                    Place::Ptr { .. } => {
-                        bug!("uninit intrinsic tried to write to fat or unaligned ptr target")
+                // Check fast path: we don't want to force an allocation in case the destination is a simple value,
+                // but we also do not want to create a new allocation with 0s and then copy that over.
+                match dest.layout.abi {
+                    layout::Abi::Scalar(..) => {
+                        let x = ScalarMaybeUndef::Undef;
+                        self.write_value(Value::Scalar(x), dest)?;
+                    }
+                    layout::Abi::ScalarPair(..) => {
+                        let x = ScalarMaybeUndef::Undef;
+                        self.write_value(Value::ScalarPair(x, x), dest)?;
+                    }
+                    _ => {
+                        // Do it in memory
+                        let mplace = self.force_allocation(dest)?;
+                        assert_eq!(mplace.extra, PlaceExtra::None);
+                        self.memory.mark_definedness(mplace.ptr, dest.layout.size, false)?;
                     }
                 }
             }
@@ -646,9 +592,9 @@ fn call_intrinsic(
             "write_bytes" => {
                 let ty = substs.type_at(0);
                 let ty_layout = self.layout_of(ty)?;
-                let val_byte = self.value_to_u8(args[1])?;
-                let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
-                let count = self.value_to_usize(args[2])?;
+                let val_byte = self.read_scalar(args[1])?.to_u8()?;
+                let ptr = self.read_scalar(args[0])?.not_undef()?;
+                let count = self.read_scalar(args[2])?.to_usize(&self)?;
                 if count > 0 {
                     // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
                     // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
index 705f56d38f39d4f62fb386b3112cf806419830dd..3680d49836fb52ca3499b99536837f7c179f946c 100644 (file)
@@ -27,6 +27,7 @@
 use syntax::ast::Mutability;
 use syntax::codemap::Span;
 
+use std::marker::PhantomData;
 use std::collections::{HashMap, BTreeMap};
 use std::hash::{Hash, Hasher};
 
 mod tls;
 mod locks;
 mod range_map;
-mod validation;
 
 use fn_call::EvalContextExt as MissingFnsEvalContextExt;
 use operator::EvalContextExt as OperatorEvalContextExt;
 use intrinsic::EvalContextExt as IntrinsicEvalContextExt;
 use tls::EvalContextExt as TlsEvalContextExt;
 use locks::LockInfo;
-use locks::MemoryExt as LockMemoryExt;
-use validation::EvalContextExt as ValidationEvalContextExt;
 use range_map::RangeMap;
-use validation::{ValidationQuery, AbsPlace};
-
-pub trait ScalarExt {
-    fn null(size: Size) -> Self;
-    fn from_i32(i: i32) -> Self;
-    fn from_uint(i: impl Into<u128>, ptr_size: Size) -> Self;
-    fn from_int(i: impl Into<i128>, ptr_size: Size) -> Self;
-    fn from_f32(f: f32) -> Self;
-    fn from_f64(f: f64) -> Self;
-    fn to_usize<'a, 'mir, 'tcx>(self, ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>) -> EvalResult<'static, u64>;
-    fn is_null(self) -> bool;
-    /// HACK: this function just extracts all bits if `defined != 0`
-    /// Mainly used for args of C-functions and we should totally correctly fetch the size
-    /// of their arguments
-    fn to_bytes(self) -> EvalResult<'static, u128>;
-}
-
-impl ScalarExt for Scalar {
-    fn null(size: Size) -> Self {
-        Scalar::Bits { bits: 0, size: size.bytes() as u8 }
-    }
-
-    fn from_i32(i: i32) -> Self {
-        Scalar::Bits { bits: i as u32 as u128, size: 4 }
-    }
-
-    fn from_uint(i: impl Into<u128>, ptr_size: Size) -> Self {
-        Scalar::Bits { bits: i.into(), size: ptr_size.bytes() as u8 }
-    }
-
-    fn from_int(i: impl Into<i128>, ptr_size: Size) -> Self {
-        Scalar::Bits { bits: i.into() as u128, size: ptr_size.bytes() as u8 }
-    }
-
-    fn from_f32(f: f32) -> Self {
-        Scalar::Bits { bits: f.to_bits() as u128, size: 4 }
-    }
-
-    fn from_f64(f: f64) -> Self {
-        Scalar::Bits { bits: f.to_bits() as u128, size: 8 }
-    }
-
-    fn to_usize<'a, 'mir, 'tcx>(self, ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>) -> EvalResult<'static, u64> {
-        let b = self.to_bits(ecx.memory.pointer_size())?;
-        assert_eq!(b as u64 as u128, b);
-        Ok(b as u64)
-    }
-
-    fn is_null(self) -> bool {
-        match self {
-            Scalar::Bits { bits, .. } => bits == 0,
-            Scalar::Ptr(_) => false
-        }
-    }
-
-    fn to_bytes(self) -> EvalResult<'static, u128> {
-        match self {
-            Scalar::Bits { bits, size } => {
-                assert_ne!(size, 0);
-                Ok(bits)
-            },
-            Scalar::Ptr(_) => err!(ReadPointerAsBytes),
-        }
-    }
-}
+use helpers::{ScalarExt, FalibleScalarExt};
 
 pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
     tcx: TyCtxt<'a, 'tcx, 'tcx>,
@@ -180,31 +114,22 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
         // First argument: pointer to main()
         let main_ptr = ecx.memory_mut().create_fn_alloc(main_instance);
         let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
-        let main_ty = main_instance.ty(ecx.tcx.tcx);
-        let main_ptr_ty = ecx.tcx.mk_fn_ptr(main_ty.fn_sig(ecx.tcx.tcx));
-        ecx.write_value(
-            ValTy {
-                value: Value::Scalar(Scalar::Ptr(main_ptr).into()),
-                ty: main_ptr_ty,
-            },
-            dest,
-        )?;
+        ecx.write_scalar(Scalar::Ptr(main_ptr), dest)?;
 
         // Second argument (argc): 1
         let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
-        let ty = ecx.tcx.types.isize;
-        ecx.write_scalar(dest, Scalar::from_int(1, ptr_size), ty)?;
+        ecx.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
 
         // FIXME: extract main source file path
         // Third argument (argv): &[b"foo"]
         let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
-        let ty = ecx.tcx.mk_imm_ptr(ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8));
         let foo = ecx.memory.allocate_bytes(b"foo\0");
-        let ptr_align = ecx.tcx.data_layout.pointer_align;
-        let foo_ptr = ecx.memory.allocate(ptr_size, ptr_align, MemoryKind::Stack)?;
-        ecx.memory.write_scalar(foo_ptr.into(), ptr_align, Scalar::Ptr(foo).into(), ptr_size, ptr_align, false)?;
-        ecx.memory.mark_static_initialized(foo_ptr.alloc_id, Mutability::Immutable)?;
-        ecx.write_ptr(dest, foo_ptr.into(), ty)?;
+        let foo_ty = ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8);
+        let foo_layout = ecx.layout_of(foo_ty)?;
+        let foo_place = ecx.allocate(foo_layout, MemoryKind::Stack)?;
+        ecx.write_scalar(Scalar::Ptr(foo), foo_place.into())?;
+        ecx.memory.mark_static_initialized(foo_place.to_ptr()?.alloc_id, Mutability::Immutable)?;
+        ecx.write_scalar(foo_place.ptr, dest)?;
 
         assert!(args.next().is_none(), "start lang item has more arguments than expected");
     } else {
@@ -293,15 +218,15 @@ pub struct Evaluator<'tcx> {
     /// Miri does not expose env vars from the host to the emulated program
     pub(crate) env_vars: HashMap<Vec<u8>, Pointer>,
 
-    /// Places that were suspended by the validation subsystem, and will be recovered later
-    pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
+    /// Use the lifetime
+    _dummy : PhantomData<&'tcx ()>,
 }
 
 impl<'tcx> Hash for Evaluator<'tcx> {
     fn hash<H: Hasher>(&self, state: &mut H) {
         let Evaluator {
             env_vars,
-            suspended: _,
+            _dummy: _,
         } = self;
 
         env_vars.iter()
@@ -373,34 +298,32 @@ impl<'mir, 'tcx: 'mir> Machine<'mir, 'tcx> for Evaluator<'tcx> {
     fn eval_fn_call<'a>(
         ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
         instance: ty::Instance<'tcx>,
-        destination: Option<(Place, mir::BasicBlock)>,
-        args: &[ValTy<'tcx>],
+        destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        args: &[OpTy<'tcx>],
         span: Span,
-        sig: ty::FnSig<'tcx>,
     ) -> EvalResult<'tcx, bool> {
-        ecx.eval_fn_call(instance, destination, args, span, sig)
+        ecx.eval_fn_call(instance, destination, args, span)
     }
 
     fn call_intrinsic<'a>(
         ecx: &mut rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Self>,
         instance: ty::Instance<'tcx>,
-        args: &[ValTy<'tcx>],
-        dest: Place,
-        dest_layout: TyLayout<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: PlaceTy<'tcx>,
         target: mir::BasicBlock,
     ) -> EvalResult<'tcx> {
-        ecx.call_intrinsic(instance, args, dest, dest_layout, target)
+        ecx.call_intrinsic(instance, args, dest, target)
     }
 
     fn try_ptr_op<'a>(
         ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Self>,
         bin_op: mir::BinOp,
         left: Scalar,
-        left_ty: ty::Ty<'tcx>,
+        left_layout: TyLayout<'tcx>,
         right: Scalar,
-        right_ty: ty::Ty<'tcx>,
+        right_layout: TyLayout<'tcx>,
     ) -> EvalResult<'tcx, Option<(Scalar, bool)>> {
-        ecx.ptr_op(bin_op, left, left_ty, right, right_ty)
+        ecx.ptr_op(bin_op, left, left_layout, right, right_layout)
     }
 
     fn mark_static_initialized<'a>(
@@ -460,14 +383,16 @@ fn init_static<'a>(
         let call_stackframe = ecx.stack().len();
         while ecx.step()? && ecx.stack().len() >= call_stackframe {
             if ecx.stack().len() == call_stackframe {
-                let frame = ecx.frame_mut();
-                let bb = &frame.mir.basic_blocks()[frame.block];
-                if bb.statements.len() == frame.stmt && !bb.is_cleanup {
-                    if let ::rustc::mir::TerminatorKind::Return = bb.terminator().kind {
-                        for (local, _local_decl) in mir.local_decls.iter_enumerated().skip(1) {
-                            // Don't deallocate locals, because the return value might reference them
-                            frame.storage_dead(local);
-                        }
+                let cleanup = {
+                    let frame = ecx.frame();
+                    let bb = &frame.mir.basic_blocks()[frame.block];
+                    bb.statements.len() == frame.stmt && !bb.is_cleanup &&
+                        if let ::rustc::mir::TerminatorKind::Return = bb.terminator().kind { true } else { false }
+                };
+                if cleanup {
+                    for (local, _local_decl) in mir.local_decls.iter_enumerated().skip(1) {
+                        // Don't deallocate locals, because the return value might reference them
+                        ecx.storage_dead(local);
                     }
                 }
             }
@@ -481,11 +406,9 @@ fn init_static<'a>(
 
     fn box_alloc<'a>(
         ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
-        ty: ty::Ty<'tcx>,
-        dest: Place,
+        dest: PlaceTy<'tcx>,
     ) -> EvalResult<'tcx> {
-        let layout = ecx.layout_of(ty)?;
-
+        trace!("box_alloc for {:?}", dest.layout.ty);
         // Call the `exchange_malloc` lang item
         let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
         let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
@@ -494,7 +417,7 @@ fn box_alloc<'a>(
             malloc,
             malloc_mir.span,
             malloc_mir,
-            dest,
+            *dest,
             // Don't do anything when we are done.  The statement() function will increment
             // the old stack frame's stmt counter to the next statement, which means that when
             // exchange_malloc returns, we go on evaluating exactly where we want to be.
@@ -502,31 +425,18 @@ fn box_alloc<'a>(
         )?;
 
         let mut args = ecx.frame().mir.args_iter();
-        let usize = ecx.tcx.types.usize;
-        let ptr_size = ecx.memory.pointer_size();
+        let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
 
         // First argument: size
-        let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
-        ecx.write_value(
-            ValTy {
-                value: Value::Scalar(Scalar::from_uint(match layout.size.bytes() {
-                    0 => 1,
-                    size => size,
-                }, ptr_size).into()),
-                ty: usize,
-            },
-            dest,
-        )?;
+        // (0 is allowed here, this is expected to be handled by the lang item)
+        let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
+        let size = layout.size.bytes();
+        ecx.write_scalar(Scalar::from_uint(size, arg.layout.size), arg)?;
 
         // Second argument: align
-        let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
-        ecx.write_value(
-            ValTy {
-                value: Value::Scalar(Scalar::from_uint(layout.align.abi(), ptr_size).into()),
-                ty: usize,
-            },
-            dest,
-        )?;
+        let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
+        let align = layout.align.abi();
+        ecx.write_scalar(Scalar::from_uint(align, arg.layout.size), arg)?;
 
         // No more arguments
         assert!(args.next().is_none(), "exchange_malloc lang item has more arguments than expected");
@@ -542,52 +452,32 @@ fn global_item_with_linkage<'a>(
     }
 
     fn check_locks<'a>(
-        mem: &Memory<'a, 'mir, 'tcx, Self>,
-        ptr: Pointer,
-        size: Size,
-        access: AccessKind,
+        _mem: &Memory<'a, 'mir, 'tcx, Self>,
+        _ptr: Pointer,
+        _size: Size,
+        _access: AccessKind,
     ) -> EvalResult<'tcx> {
-        mem.check_locks(ptr, size.bytes(), access)
+        Ok(())
     }
 
     fn add_lock<'a>(
-        mem: &mut Memory<'a, 'mir, 'tcx, Self>,
-        id: AllocId,
-    ) {
-        mem.data.locks.insert(id, RangeMap::new());
-    }
+        _mem: &mut Memory<'a, 'mir, 'tcx, Self>,
+        _id: AllocId,
+    ) { }
 
     fn free_lock<'a>(
-        mem: &mut Memory<'a, 'mir, 'tcx, Self>,
-        id: AllocId,
-        len: u64,
+        _mem: &mut Memory<'a, 'mir, 'tcx, Self>,
+        _id: AllocId,
+        _len: u64,
     ) -> EvalResult<'tcx> {
-        mem.data.locks
-            .remove(&id)
-            .expect("allocation has no corresponding locks")
-            .check(
-                Some(mem.cur_frame),
-                0,
-                len,
-                AccessKind::Read,
-            )
-            .map_err(|lock| {
-                EvalErrorKind::DeallocatedLockedMemory {
-                    //ptr, FIXME
-                    ptr: Pointer {
-                        alloc_id: AllocId(0),
-                        offset: Size::from_bytes(0),
-                    },
-                    lock: lock.active,
-                }.into()
-            })
+        Ok(())
     }
 
     fn end_region<'a>(
-        ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
-        reg: Option<::rustc::middle::region::Scope>,
+        _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
+        _reg: Option<::rustc::middle::region::Scope>,
     ) -> EvalResult<'tcx> {
-        ecx.end_region(reg)
+        Ok(())
     }
 
     fn validation_op<'a>(
index 9f4126ad82b60002e105863b78712e8f10ae5278..a87ff6367e3ad89c398360e87013b3d994586d22 100644 (file)
@@ -1,3 +1,5 @@
+#![allow(unused)]
+
 use super::*;
 use rustc::middle::region;
 use rustc::ty::layout::Size;
@@ -6,6 +8,9 @@
 // Locks
 ////////////////////////////////////////////////////////////////////////////////
 
+// Just some dummy to keep this compiling; I think some of this will be useful later
+type AbsPlace<'tcx> = ::rustc::ty::Ty<'tcx>;
+
 /// Information about a lock that is currently held.
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct LockInfo<'tcx> {
@@ -67,321 +72,6 @@ fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
     }
 }
 
-pub trait MemoryExt<'tcx> {
-    fn check_locks(
-        &self,
-        ptr: Pointer,
-        len: u64,
-        access: AccessKind,
-    ) -> EvalResult<'tcx>;
-    fn acquire_lock(
-        &mut self,
-        ptr: Pointer,
-        len: u64,
-        region: Option<region::Scope>,
-        kind: AccessKind,
-    ) -> EvalResult<'tcx>;
-    fn suspend_write_lock(
-        &mut self,
-        ptr: Pointer,
-        len: u64,
-        lock_path: &AbsPlace<'tcx>,
-        suspend: Option<region::Scope>,
-    ) -> EvalResult<'tcx>;
-    fn recover_write_lock(
-        &mut self,
-        ptr: Pointer,
-        len: u64,
-        lock_path: &AbsPlace<'tcx>,
-        lock_region: Option<region::Scope>,
-        suspended_region: region::Scope,
-    ) -> EvalResult<'tcx>;
-    fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>);
-}
-
-
-impl<'a, 'mir, 'tcx: 'mir + 'a> MemoryExt<'tcx> for Memory<'a, 'mir, 'tcx, Evaluator<'tcx>> {
-    fn check_locks(
-        &self,
-        ptr: Pointer,
-        len: u64,
-        access: AccessKind,
-    ) -> EvalResult<'tcx> {
-        if len == 0 {
-            return Ok(());
-        }
-        let locks = match self.data.locks.get(&ptr.alloc_id) {
-            Some(locks) => locks,
-            // immutable static or other constant memory
-            None => return Ok(()),
-        };
-        let frame = self.cur_frame;
-        locks
-            .check(Some(frame), ptr.offset.bytes(), len, access)
-            .map_err(|lock| {
-                EvalErrorKind::MemoryLockViolation {
-                    ptr,
-                    len,
-                    frame,
-                    access,
-                    lock: lock.active,
-                }.into()
-            })
-    }
-
-    /// Acquire the lock for the given lifetime
-    fn acquire_lock(
-        &mut self,
-        ptr: Pointer,
-        len: u64,
-        region: Option<region::Scope>,
-        kind: AccessKind,
-    ) -> EvalResult<'tcx> {
-        let frame = self.cur_frame;
-        assert!(len > 0);
-        trace!(
-            "Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}",
-            frame,
-            kind,
-            ptr,
-            len,
-            region
-        );
-        self.check_bounds(ptr.offset(Size::from_bytes(len), &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
-
-        let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
-            Some(locks) => locks,
-            // immutable static or other constant memory
-            None => return Ok(()),
-        };
-
-        // Iterate over our range and acquire the lock.  If the range is already split into pieces,
-        // we have to manipulate all of them.
-        let lifetime = DynamicLifetime { frame, region };
-        for lock in locks.iter_mut(ptr.offset.bytes(), len) {
-            if !lock.access_permitted(None, kind) {
-                return err!(MemoryAcquireConflict {
-                    ptr,
-                    len,
-                    kind,
-                    lock: lock.active.clone(),
-                });
-            }
-            // See what we have to do
-            match (&mut lock.active, kind) {
-                (active @ &mut NoLock, AccessKind::Write) => {
-                    *active = WriteLock(lifetime);
-                }
-                (active @ &mut NoLock, AccessKind::Read) => {
-                    *active = ReadLock(vec![lifetime]);
-                }
-                (&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
-                    lifetimes.push(lifetime);
-                }
-                _ => bug!("We already checked that there is no conflicting lock"),
-            }
-        }
-        Ok(())
-    }
-
-    /// Release or suspend a write lock of the given lifetime prematurely.
-    /// When releasing, if there is a read lock or someone else's write lock, that's an error.
-    /// If no lock is held, that's fine.  This can happen when e.g. a local is initialized
-    /// from a constant, and then suspended.
-    /// When suspending, the same cases are fine; we just register an additional suspension.
-    fn suspend_write_lock(
-        &mut self,
-        ptr: Pointer,
-        len: u64,
-        lock_path: &AbsPlace<'tcx>,
-        suspend: Option<region::Scope>,
-    ) -> EvalResult<'tcx> {
-        assert!(len > 0);
-        let cur_frame = self.cur_frame;
-        let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
-            Some(locks) => locks,
-            // immutable static or other constant memory
-            None => return Ok(()),
-        };
-
-        'locks: for lock in locks.iter_mut(ptr.offset.bytes(), len) {
-            let is_our_lock = match lock.active {
-                WriteLock(lft) =>
-                    // Double-check that we are holding the lock.
-                    // (Due to subtyping, checking the region would not make any sense.)
-                    lft.frame == cur_frame,
-                ReadLock(_) | NoLock => false,
-            };
-            if is_our_lock {
-                trace!("Releasing {:?}", lock.active);
-                // Disable the lock
-                lock.active = NoLock;
-            } else {
-                trace!(
-                    "Not touching {:?} as it is not our lock",
-                    lock.active,
-                );
-            }
-            // Check if we want to register a suspension
-            if let Some(suspend_region) = suspend {
-                let lock_id = WriteLockId {
-                    frame: cur_frame,
-                    path: lock_path.clone(),
-                };
-                trace!("Adding suspension to {:?}", lock_id);
-                let mut new_suspension = false;
-                lock.suspended
-                    .entry(lock_id)
-                    // Remember whether we added a new suspension or not
-                    .or_insert_with(|| { new_suspension = true; Vec::new() })
-                    .push(suspend_region);
-                // If the suspension is new, we should have owned this.
-                // If there already was a suspension, we should NOT have owned this.
-                if new_suspension == is_our_lock {
-                    // All is well
-                    continue 'locks;
-                }
-            } else if !is_our_lock {
-                // All is well.
-                continue 'locks;
-            }
-            // If we get here, releasing this is an error except for NoLock.
-            if lock.active != NoLock {
-                return err!(InvalidMemoryLockRelease {
-                    ptr,
-                    len,
-                    frame: cur_frame,
-                    lock: lock.active.clone(),
-                });
-            }
-        }
-
-        Ok(())
-    }
-
-    /// Release a suspension from the write lock.  If this is the last suspension or if there is no suspension, acquire the lock.
-    fn recover_write_lock(
-        &mut self,
-        ptr: Pointer,
-        len: u64,
-        lock_path: &AbsPlace<'tcx>,
-        lock_region: Option<region::Scope>,
-        suspended_region: region::Scope,
-    ) -> EvalResult<'tcx> {
-        assert!(len > 0);
-        let cur_frame = self.cur_frame;
-        let lock_id = WriteLockId {
-            frame: cur_frame,
-            path: lock_path.clone(),
-        };
-        let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
-            Some(locks) => locks,
-            // immutable static or other constant memory
-            None => return Ok(()),
-        };
-
-        for lock in locks.iter_mut(ptr.offset.bytes(), len) {
-            // Check if we have a suspension here
-            let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
-                None => {
-                    trace!("No suspension around, we can just acquire");
-                    (true, false)
-                }
-                Some(suspensions) => {
-                    trace!("Found suspension of {:?}, removing it", lock_id);
-                    // That's us!  Remove suspension (it should be in there).  The same suspension can
-                    // occur multiple times (when there are multiple shared borrows of this that have the same
-                    // lifetime); only remove one of them.
-                    let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
-                        None => // TODO: Can the user trigger this?
-                            bug!("We have this lock suspended, but not for the given region."),
-                        Some((idx, _)) => idx
-                    };
-                    suspensions.remove(idx);
-                    let got_lock = suspensions.is_empty();
-                    if got_lock {
-                        trace!("All suspensions are gone, we can have the lock again");
-                    }
-                    (got_lock, got_lock)
-                }
-            };
-            if remove_suspension {
-                // with NLL, we could do that up in the match above...
-                assert!(got_the_lock);
-                lock.suspended.remove(&lock_id);
-            }
-            if got_the_lock {
-                match lock.active {
-                    ref mut active @ NoLock => {
-                        *active = WriteLock(
-                            DynamicLifetime {
-                                frame: cur_frame,
-                                region: lock_region,
-                            }
-                        );
-                    }
-                    _ => {
-                        return err!(MemoryAcquireConflict {
-                            ptr,
-                            len,
-                            kind: AccessKind::Write,
-                            lock: lock.active.clone(),
-                        })
-                    }
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>) {
-        let cur_frame = self.cur_frame;
-        trace!(
-            "Releasing frame {} locks that expire at {:?}",
-            cur_frame,
-            ending_region
-        );
-        let has_ended = |lifetime: &DynamicLifetime| -> bool {
-            if lifetime.frame != cur_frame {
-                return false;
-            }
-            match ending_region {
-                None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
-                // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
-                // end of a function.  Same for a function still having recoveries.
-                Some(ending_region) => lifetime.region == Some(ending_region),
-            }
-        };
-
-        for alloc_locks in self.data.locks.values_mut() {
-            for lock in alloc_locks.iter_mut_all() {
-                // Delete everything that ends now -- i.e., keep only all the other lifetimes.
-                let lock_ended = match lock.active {
-                    WriteLock(ref lft) => has_ended(lft),
-                    ReadLock(ref mut lfts) => {
-                        lfts.retain(|lft| !has_ended(lft));
-                        lfts.is_empty()
-                    }
-                    NoLock => false,
-                };
-                if lock_ended {
-                    lock.active = NoLock;
-                }
-                // Also clean up suspended write locks when the function returns
-                if ending_region.is_none() {
-                    lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
-                }
-            }
-            // Clean up the map
-            alloc_locks.retain(|lock| match lock.active {
-                NoLock => !lock.suspended.is_empty(),
-                _ => true,
-            });
-        }
-    }
-}
-
 impl<'tcx> RangeMap<LockInfo<'tcx>> {
     pub fn check(
         &self,
index 7be77771a7cafea4a745be212c9d0fdc75b7840e..3ff38008abd0bd816f2877f291820141203f5b26 100644 (file)
@@ -1,19 +1,17 @@
-use rustc::ty;
-use rustc::ty::layout::Primitive;
+use rustc::ty::{self, Ty};
+use rustc::ty::layout::{TyLayout, Primitive};
 use rustc::mir;
 
 use super::*;
 
-use helpers::EvalContextExt as HelperEvalContextExt;
-
 pub trait EvalContextExt<'tcx> {
     fn ptr_op(
         &self,
         bin_op: mir::BinOp,
         left: Scalar,
-        left_ty: ty::Ty<'tcx>,
+        left_layout: TyLayout<'tcx>,
         right: Scalar,
-        right_ty: ty::Ty<'tcx>,
+        right_layout: TyLayout<'tcx>,
     ) -> EvalResult<'tcx, Option<(Scalar, bool)>>;
 
     fn ptr_int_arithmetic(
@@ -23,6 +21,13 @@ fn ptr_int_arithmetic(
         right: i128,
         signed: bool,
     ) -> EvalResult<'tcx, (Scalar, bool)>;
+
+    fn pointer_offset_inbounds(
+        &self,
+        ptr: Scalar,
+        pointee_ty: Ty<'tcx>,
+        offset: i64,
+    ) -> EvalResult<'tcx, Scalar>;
 }
 
 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
@@ -30,9 +35,9 @@ fn ptr_op(
         &self,
         bin_op: mir::BinOp,
         left: Scalar,
-        left_ty: ty::Ty<'tcx>,
+        left_layout: TyLayout<'tcx>,
         right: Scalar,
-        right_ty: ty::Ty<'tcx>,
+        right_layout: TyLayout<'tcx>,
     ) -> EvalResult<'tcx, Option<(Scalar, bool)>> {
         trace!("ptr_op: {:?} {:?} {:?}", left, bin_op, right);
 
@@ -45,7 +50,7 @@ fn ptr_op(
             8 => I64,
             16 => I128,
             _ => unreachable!(),
-        }, false);
+        }, /*signed*/ false);
         let isize = Primitive::Int(match self.memory.pointer_size().bytes() {
             1 => I8,
             2 => I16,
@@ -53,24 +58,23 @@ fn ptr_op(
             8 => I64,
             16 => I128,
             _ => unreachable!(),
-        }, true);
-        let left_layout = self.layout_of(left_ty)?;
+        }, /*signed*/ true);
         let left_kind = match left_layout.abi {
             ty::layout::Abi::Scalar(ref scalar) => scalar.value,
-            _ => Err(EvalErrorKind::TypeNotPrimitive(left_ty))?,
+            _ => Err(EvalErrorKind::TypeNotPrimitive(left_layout.ty))?,
         };
-        let right_layout = self.layout_of(right_ty)?;
         let right_kind = match right_layout.abi {
             ty::layout::Abi::Scalar(ref scalar) => scalar.value,
-            _ => Err(EvalErrorKind::TypeNotPrimitive(right_ty))?,
+            _ => Err(EvalErrorKind::TypeNotPrimitive(right_layout.ty))?,
         };
         match bin_op {
-            Offset if left_kind == Primitive::Pointer && right_kind == usize => {
-                let pointee_ty = left_ty
+            Offset => {
+                assert!(left_kind == Primitive::Pointer && right_kind == usize);
+                let pointee_ty = left_layout.ty
                     .builtin_deref(true)
                     .expect("Offset called on non-ptr type")
                     .ty;
-                let ptr = self.pointer_offset(
+                let ptr = self.pointer_offset_inbounds(
                     left,
                     pointee_ty,
                     right.to_bits(self.memory.pointer_size())? as i64,
@@ -114,12 +118,13 @@ fn ptr_op(
                         Gt => left.offset > right.offset,
                         Ge => left.offset >= right.offset,
                         Sub => {
+                            let left_offset = Scalar::from_uint(left.offset.bytes(), self.memory.pointer_size());
+                            let right_offset = Scalar::from_uint(right.offset.bytes(), self.memory.pointer_size());
+                            let layout = self.layout_of(self.tcx.types.usize)?;
                             return self.binary_op(
                                 Sub,
-                                Scalar::Bits { bits: left.offset.bytes() as u128, size: self.memory.pointer_size().bytes() as u8 },
-                                self.tcx.types.usize,
-                                Scalar::Bits { bits: right.offset.bytes() as u128, size: self.memory.pointer_size().bytes() as u8 },
-                                self.tcx.types.usize,
+                                ValTy { value: Value::Scalar(left_offset.into()), layout },
+                                ValTy { value: Value::Scalar(right_offset.into()), layout },
                             ).map(Some)
                         }
                         _ => bug!("We already established it has to be one of these operators."),
@@ -200,4 +205,40 @@ fn map_to_primval((res, over): (Pointer, bool)) -> (Scalar, bool) {
             }
         })
     }
+
+    /// This function raises an error if the offset moves the pointer outside of its allocation.  We consider
+    /// ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0).
+    /// We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own
+    /// allocation.
+    fn pointer_offset_inbounds(
+        &self,
+        ptr: Scalar,
+        pointee_ty: Ty<'tcx>,
+        offset: i64,
+    ) -> EvalResult<'tcx, Scalar> {
+        if ptr.is_null() {
+            // NULL pointers must only be offset by 0
+            return if offset == 0 {
+                Ok(ptr)
+            } else {
+                err!(InvalidNullPointerUsage)
+            };
+        }
+        // FIXME: assuming here that type size is < i64::max_value()
+        let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
+        let offset = offset.checked_mul(pointee_size).ok_or_else(|| EvalErrorKind::Overflow(mir::BinOp::Mul))?;
+        // Now let's see what kind of pointer this is
+        if let Scalar::Ptr(ptr) = ptr {
+            // Both old and new pointer must be in-bounds.
+            // (Of the same allocation, but that part is trivial with our representation.)
+            self.memory.check_bounds(ptr, false)?;
+            let ptr = ptr.signed_offset(offset, self)?;
+            self.memory.check_bounds(ptr, false)?;
+            Ok(Scalar::Ptr(ptr))
+        } else {
+            // An integer pointer. They can move around freely, as long as they do not overflow
+            // (which ptr_signed_offset checks).
+            ptr.ptr_signed_offset(offset, self)
+        }
+    }
 }
index 76d01ad19e3ab609e5bfafc80aed5ea3c7cf24c7..e55534e36fd2ed08708b077df16224a24a4f1b8e 100644 (file)
@@ -1,3 +1,5 @@
+#![allow(unused)]
+
 //! Implements a map from integer indices to data.
 //! Rather than storing data for every index, internally, this maps entire ranges to the data.
 //! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
index 9f0fb2c8f62a17701bea41b6b8b380bc8afe9c44..878884065bb71e0285a63fa147aabdefcd77ae66 100644 (file)
@@ -119,19 +119,19 @@ fn run_tls_dtors(&mut self) -> EvalResult<'tcx> {
             // TODO: Potentially, this has to support all the other possible instances?
             // See eval_fn_call in interpret/terminator/mod.rs
             let mir = self.load_mir(instance.def)?;
+            let ret = Place::null(&self);
             self.push_stack_frame(
                 instance,
                 mir.span,
                 mir,
-                Place::undef(),
+                ret,
                 StackPopCleanup::None,
             )?;
             let arg_local = self.frame().mir.args_iter().next().ok_or_else(
                 || EvalErrorKind::AbiViolation("TLS dtor does not take enough arguments.".to_owned()),
             )?;
             let dest = self.eval_place(&mir::Place::Local(arg_local))?;
-            let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
-            self.write_ptr(dest, ptr, ty)?;
+            self.write_scalar(ptr, dest)?;
 
             // step until out of stackframes
             while self.step()? {}
diff --git a/src/validation.rs b/src/validation.rs
deleted file mode 100644 (file)
index 7f0abb9..0000000
+++ /dev/null
@@ -1,803 +0,0 @@
-use rustc::hir::{self, Mutability};
-use rustc::hir::Mutability::*;
-use rustc::mir::{self, ValidationOp, ValidationOperand};
-use rustc::mir::interpret::GlobalId;
-use rustc::ty::{self, Ty, TypeFoldable, TyCtxt, Instance};
-use rustc::ty::layout::{LayoutOf, PrimitiveExt};
-use rustc::ty::subst::{Substs, Subst};
-use rustc::traits::{self, TraitEngine};
-use rustc::infer::InferCtxt;
-use rustc::middle::region;
-use rustc::mir::interpret::{ConstValue};
-use rustc_data_structures::indexed_vec::Idx;
-use rustc_mir::interpret::HasMemory;
-
-use super::{EvalContext, Place, PlaceExtra, ValTy, ScalarExt};
-use rustc::mir::interpret::{DynamicLifetime, AccessKind, EvalErrorKind, Value, EvalError, EvalResult};
-use locks::MemoryExt;
-
-pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsPlace<'tcx>, Place)>;
-
-#[derive(Copy, Clone, Debug, PartialEq)]
-pub(crate) enum ValidationMode {
-    Acquire,
-    /// Recover because the given region ended
-    Recover(region::Scope),
-    ReleaseUntil(Option<region::Scope>),
-}
-
-impl ValidationMode {
-    fn acquiring(self) -> bool {
-        use self::ValidationMode::*;
-        match self {
-            Acquire | Recover(_) => true,
-            ReleaseUntil(_) => false,
-        }
-    }
-}
-
-// Abstract places
-#[derive(Clone, Debug, PartialEq, Eq, Hash)]
-pub enum AbsPlace<'tcx> {
-    Local(mir::Local),
-    Static(hir::def_id::DefId),
-    Projection(Box<AbsPlaceProjection<'tcx>>),
-}
-
-type AbsPlaceProjection<'tcx> = mir::Projection<'tcx, AbsPlace<'tcx>, u64, ()>;
-type AbsPlaceElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
-
-impl<'tcx> AbsPlace<'tcx> {
-    pub fn field(self, f: mir::Field) -> AbsPlace<'tcx> {
-        self.elem(mir::ProjectionElem::Field(f, ()))
-    }
-
-    pub fn deref(self) -> AbsPlace<'tcx> {
-        self.elem(mir::ProjectionElem::Deref)
-    }
-
-    pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsPlace<'tcx> {
-        self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
-    }
-
-    pub fn index(self, index: u64) -> AbsPlace<'tcx> {
-        self.elem(mir::ProjectionElem::Index(index))
-    }
-
-    fn elem(self, elem: AbsPlaceElem<'tcx>) -> AbsPlace<'tcx> {
-        AbsPlace::Projection(Box::new(AbsPlaceProjection {
-            base: self,
-            elem,
-        }))
-    }
-}
-
-pub(crate) trait EvalContextExt<'tcx> {
-    fn abstract_place_projection(&self, proj: &mir::PlaceProjection<'tcx>) -> EvalResult<'tcx, AbsPlaceProjection<'tcx>>;
-    fn abstract_place(&self, place: &mir::Place<'tcx>) -> EvalResult<'tcx, AbsPlace<'tcx>>;
-    fn validation_op(
-        &mut self,
-        op: ValidationOp,
-        operand: &ValidationOperand<'tcx, mir::Place<'tcx>>,
-    ) -> EvalResult<'tcx>;
-    fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx>;
-    fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx>;
-    fn field_with_lifetimes(
-        &mut self,
-        base: Place,
-        layout: ty::layout::TyLayout<'tcx>,
-        i: usize,
-    ) -> EvalResult<'tcx, Ty<'tcx>>;
-    fn validate_fields(
-        &mut self,
-        query: ValidationQuery<'tcx>,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx>;
-    fn validate_ptr(
-        &mut self,
-        val: Value,
-        abs_place: AbsPlace<'tcx>,
-        pointee_ty: Ty<'tcx>,
-        re: Option<region::Scope>,
-        mutbl: Mutability,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx>;
-    fn validate(
-        &mut self,
-        query: ValidationQuery<'tcx>,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx>;
-}
-
-impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
-    fn abstract_place_projection(&self, proj: &mir::PlaceProjection<'tcx>) -> EvalResult<'tcx, AbsPlaceProjection<'tcx>> {
-        use self::mir::ProjectionElem::*;
-
-        let elem = match proj.elem {
-            Deref => Deref,
-            Field(f, _) => Field(f, ()),
-            Index(v) => {
-                let value = self.frame().locals[v].access()?;
-                let ty = self.tcx.tcx.types.usize;
-                let n = self.value_to_scalar(ValTy { value, ty })?.to_usize(self)?;
-                Index(n)
-            },
-            ConstantIndex { offset, min_length, from_end } =>
-                ConstantIndex { offset, min_length, from_end },
-            Subslice { from, to } =>
-                Subslice { from, to },
-            Downcast(adt, sz) => Downcast(adt, sz),
-        };
-        Ok(AbsPlaceProjection {
-            base: self.abstract_place(&proj.base)?,
-            elem
-        })
-    }
-
-    fn abstract_place(&self, place: &mir::Place<'tcx>) -> EvalResult<'tcx, AbsPlace<'tcx>> {
-        Ok(match *place {
-            mir::Place::Local(l) => AbsPlace::Local(l),
-            mir::Place::Static(ref s) => AbsPlace::Static(s.def_id),
-            mir::Place::Projection(ref p) =>
-                AbsPlace::Projection(Box::new(self.abstract_place_projection(&*p)?)),
-            _ => unimplemented!("validation is not currently maintained"),
-        })
-    }
-
-    // Validity checks
-    fn validation_op(
-        &mut self,
-        op: ValidationOp,
-        operand: &ValidationOperand<'tcx, mir::Place<'tcx>>,
-    ) -> EvalResult<'tcx> {
-        // If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands
-        // because other crates may have been compiled with mir-emit-validate > 0.  Ignore those
-        // commands.  This makes mir-emit-validate also a flag to control whether miri will do
-        // validation or not.
-        if self.tcx.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
-            return Ok(());
-        }
-        debug_assert!(self.memory.cur_frame == self.cur_frame());
-
-        // We need to monomorphize ty *without* erasing lifetimes
-        trace!("validation_op1: {:?}", operand.ty.sty);
-        let ty = operand.ty.subst(self.tcx.tcx, self.substs());
-        trace!("validation_op2: {:?}", operand.ty.sty);
-        let place = self.eval_place(&operand.place)?;
-        let abs_place = self.abstract_place(&operand.place)?;
-        let query = ValidationQuery {
-            place: (abs_place, place),
-            ty,
-            re: operand.re,
-            mutbl: operand.mutbl,
-        };
-
-        // Check the mode, and also perform mode-specific operations
-        let mode = match op {
-            ValidationOp::Acquire => ValidationMode::Acquire,
-            ValidationOp::Release => ValidationMode::ReleaseUntil(None),
-            ValidationOp::Suspend(scope) => {
-                if query.mutbl == MutMutable {
-                    let lft = DynamicLifetime {
-                        frame: self.cur_frame(),
-                        region: Some(scope), // Notably, we only ever suspend things for given regions.
-                        // Suspending for the entire function does not make any sense.
-                    };
-                    trace!("Suspending {:?} until {:?}", query, scope);
-                    self.machine.suspended.entry(lft).or_insert_with(Vec::new).push(
-                        query.clone(),
-                    );
-                }
-                ValidationMode::ReleaseUntil(Some(scope))
-            }
-        };
-        self.validate(query, mode)
-    }
-
-    /// Release locks and executes suspensions of the given region (or the entire fn, in case of None).
-    fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx> {
-        debug_assert!(self.memory.cur_frame == self.cur_frame());
-        self.memory.locks_lifetime_ended(scope);
-        match scope {
-            Some(scope) => {
-                // Recover suspended places
-                let lft = DynamicLifetime {
-                    frame: self.cur_frame(),
-                    region: Some(scope),
-                };
-                if let Some(queries) = self.machine.suspended.remove(&lft) {
-                    for query in queries {
-                        trace!("Recovering {:?} from suspension", query);
-                        self.validate(query, ValidationMode::Recover(scope))?;
-                    }
-                }
-            }
-            None => {
-                // Clean suspension table of current frame
-                let cur_frame = self.cur_frame();
-                self.machine.suspended.retain(|lft, _| {
-                    lft.frame != cur_frame // keep only what is in the other (lower) frames
-                });
-            }
-        }
-        Ok(())
-    }
-
-    fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        return normalize_associated_type(self.tcx.tcx, &ty);
-
-        use syntax::codemap::{Span, DUMMY_SP};
-
-        // We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
-        fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
-            self_: &InferCtxt<'a, 'gcx, 'tcx>,
-            param_env: ty::ParamEnv<'tcx>,
-            value: &T,
-        ) -> T::Lifted
-        where
-            T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
-        {
-            let mut selcx = traits::SelectionContext::new(self_);
-            let cause = traits::ObligationCause::dummy();
-            let traits::Normalized {
-                value: result,
-                obligations,
-            } = traits::normalize(&mut selcx, param_env, cause, value);
-
-            let mut fulfill_cx = traits::FulfillmentContext::new();
-
-            for obligation in obligations {
-                fulfill_cx.register_predicate_obligation(self_, obligation);
-            }
-
-            drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
-        }
-
-        fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
-            self_: &InferCtxt<'a, 'gcx, 'tcx>,
-            span: Span,
-            fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
-            result: &T,
-        ) -> T::Lifted
-        where
-            T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
-        {
-            // In principle, we only need to do this so long as `result`
-            // contains unbound type parameters. It could be a slight
-            // optimization to stop iterating early.
-            match fulfill_cx.select_all_or_error(self_) {
-                Ok(()) => { }
-                Err(errors) => {
-                    span_bug!(
-                        span,
-                        "Encountered errors `{:?}` resolving bounds after type-checking",
-                        errors
-                    );
-                }
-            }
-
-            let result = self_.resolve_type_vars_if_possible(result);
-            let result = self_.tcx.fold_regions(
-                &result,
-                &mut false,
-                |r, _| match *r {
-                    ty::ReVar(_) => self_.tcx.types.re_erased,
-                    _ => r,
-                },
-            );
-
-            match self_.tcx.lift_to_global(&result) {
-                Some(result) => result,
-                None => {
-                    span_bug!(span, "Uninferred types/regions in `{:?}`", result);
-                }
-            }
-        }
-
-        trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
-            fn my_trans_normalize<'a, 'tcx>(
-                &self,
-                infcx: &InferCtxt<'a, 'gcx, 'tcx>,
-                param_env: ty::ParamEnv<'tcx>,
-            ) -> Self;
-        }
-
-        macro_rules! items { ($($item:item)+) => ($($item)+) }
-        macro_rules! impl_trans_normalize {
-            ($lt_gcx:tt, $($ty:ty),+) => {
-                items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
-                    fn my_trans_normalize<'a, 'tcx>(&self,
-                                                infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
-                                                param_env: ty::ParamEnv<'tcx>)
-                                                -> Self {
-                        normalize_projections_in(infcx, param_env, self)
-                    }
-                })+);
-            }
-        }
-
-        impl_trans_normalize!('gcx,
-            Ty<'gcx>,
-            &'gcx Substs<'gcx>,
-            ty::FnSig<'gcx>,
-            ty::PolyFnSig<'gcx>,
-            ty::ClosureSubsts<'gcx>,
-            ty::PolyTraitRef<'gcx>,
-            ty::ExistentialTraitRef<'gcx>
-        );
-
-        fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
-        where
-            T: MyTransNormalize<'tcx>,
-        {
-            let param_env = ty::ParamEnv::reveal_all();
-
-            if !value.has_projections() {
-                return value.clone();
-            }
-
-            self_.infer_ctxt().enter(|infcx| {
-                value.my_trans_normalize(&infcx, param_env)
-            })
-        }
-    }
-
-    // This is a copy of `Layout::field`
-    //
-    // FIXME: remove once validation does not depend on lifetimes
-    fn field_with_lifetimes(
-        &mut self,
-        base: Place,
-        mut layout: ty::layout::TyLayout<'tcx>,
-        i: usize,
-    ) -> EvalResult<'tcx, Ty<'tcx>> {
-        if let Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } = base {
-            layout = layout.for_variant(&self, variant_index);
-        }
-        let tcx = self.tcx.tcx;
-        Ok(match layout.ty.sty {
-            ty::TyBool |
-            ty::TyChar |
-            ty::TyInt(_) |
-            ty::TyUint(_) |
-            ty::TyFloat(_) |
-            ty::TyFnPtr(_) |
-            ty::TyNever |
-            ty::TyFnDef(..) |
-            ty::TyGeneratorWitness(..) |
-            ty::TyDynamic(..) |
-            ty::TyForeign(..) => {
-                bug!("TyLayout::field_type({:?}): not applicable", layout)
-            }
-
-            // Potentially-fat pointers.
-            ty::TyRef(_, pointee, _) |
-            ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
-                assert!(i < 2);
-
-                // Reuse the fat *T type as its own thin pointer data field.
-                // This provides information about e.g. DST struct pointees
-                // (which may have no non-DST form), and will work as long
-                // as the `Abi` or `FieldPlacement` is checked by users.
-                if i == 0 {
-                    return Ok(layout.ty);
-                }
-
-                match tcx.struct_tail(pointee).sty {
-                    ty::TySlice(_) |
-                    ty::TyStr => tcx.types.usize,
-                    ty::TyDynamic(..) => {
-                        // FIXME(eddyb) use an usize/fn() array with
-                        // the correct number of vtables slots.
-                        tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
-                    }
-                    _ => bug!("TyLayout::field_type({:?}): not applicable", layout)
-                }
-            }
-
-            // Arrays and slices.
-            ty::TyArray(element, _) |
-            ty::TySlice(element) => element,
-            ty::TyStr => tcx.types.u8,
-
-            // Tuples, generators and closures.
-            ty::TyClosure(def_id, ref substs) => {
-                substs.upvar_tys(def_id, tcx).nth(i).unwrap()
-            }
-
-            ty::TyGenerator(def_id, ref substs, _) => {
-                substs.field_tys(def_id, tcx).nth(i).unwrap()
-            }
-
-            ty::TyTuple(tys) => tys[i],
-
-            // SIMD vector types.
-            ty::TyAdt(def, ..) if def.repr.simd() => {
-                layout.ty.simd_type(tcx)
-            }
-
-            // ADTs.
-            ty::TyAdt(def, substs) => {
-                use rustc::ty::layout::Variants;
-                match layout.variants {
-                    Variants::Single { index } => {
-                        def.variants[index].fields[i].ty(tcx, substs)
-                    }
-
-                    // Discriminant field for enums (where applicable).
-                    Variants::Tagged { tag: ref discr, .. } |
-                    Variants::NicheFilling { niche: ref discr, .. } => {
-                        assert_eq!(i, 0);
-                        return Ok(discr.value.to_ty(tcx))
-                    }
-                }
-            }
-
-            ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
-            ty::TyInfer(_) | ty::TyError => {
-                bug!("TyLayout::field_type: unexpected type `{}`", layout.ty)
-            }
-        })
-    }
-
-    fn validate_fields(
-        &mut self,
-        query: ValidationQuery<'tcx>,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx> {
-        let mut layout = self.layout_of(query.ty)?;
-        layout.ty = query.ty;
-
-        // TODO: Maybe take visibility/privacy into account.
-        for idx in 0..layout.fields.count() {
-            let field = mir::Field::new(idx);
-            let (field_place, field_layout) =
-                self.place_field(query.place.1, field, layout)?;
-            // layout stuff erases lifetimes, get the field ourselves
-            let field_ty = self.field_with_lifetimes(query.place.1, layout, idx)?;
-            trace!("assuming \n{:?}\n == \n{:?}\n except for lifetimes", field_layout.ty, field_ty);
-            self.validate(
-                ValidationQuery {
-                    place: (query.place.0.clone().field(field), field_place),
-                    ty: field_ty,
-                    ..query
-                },
-                mode,
-            )?;
-        }
-
-        Ok(())
-    }
-
-    fn validate_ptr(
-        &mut self,
-        val: Value,
-        abs_place: AbsPlace<'tcx>,
-        pointee_ty: Ty<'tcx>,
-        re: Option<region::Scope>,
-        mutbl: Mutability,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx> {
-        // Check alignment and non-NULLness
-        let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
-        let ptr = self.into_ptr(val)?.unwrap_or_err()?;
-        self.memory.check_align(ptr, align)?;
-
-        // Recurse
-        let pointee_place = self.val_to_place(val, pointee_ty)?;
-        self.validate(
-            ValidationQuery {
-                place: (abs_place.deref(), pointee_place),
-                ty: pointee_ty,
-                re,
-                mutbl,
-            },
-            mode,
-        )
-    }
-
-    /// Validate the place at the given type. If `acquire` is false, just do a release of all write locks
-    fn validate(
-        &mut self,
-        mut query: ValidationQuery<'tcx>,
-        mode: ValidationMode,
-    ) -> EvalResult<'tcx> {
-        use rustc::ty::TypeVariants::*;
-        use rustc::ty::RegionKind::*;
-        use rustc::ty::AdtKind;
-
-        // No point releasing shared stuff.
-        if !mode.acquiring() && query.mutbl == MutImmutable {
-            return Ok(());
-        }
-        // When we recover, we may see data whose validity *just* ended.  Do not acquire it.
-        if let ValidationMode::Recover(ending_ce) = mode {
-            if query.re == Some(ending_ce) {
-                return Ok(());
-            }
-        }
-
-        query.ty = self.normalize_type_unerased(&query.ty);
-        trace!("{:?} on {:#?}", mode, query);
-        trace!("{:#?}", query.ty.sty);
-
-        // Decide whether this type *owns* the memory it covers (like integers), or whether it
-        // just assembles pieces (that each own their memory) together to a larger whole.
-        // TODO: Currently, we don't acquire locks for padding and discriminants. We should.
-        let is_owning = match query.ty.sty {
-            TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr |
-            TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
-            TyAdt(adt, _) if adt.is_box() => true,
-            TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) |
-            TyDynamic(..) | TyGenerator(..) | TyForeign(_) => false,
-            TyGeneratorWitness(..) => unreachable!("TyGeneratorWitness in validate"),
-            TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => {
-                bug!("I got an incomplete/unnormalized type for validation")
-            }
-        };
-        if is_owning {
-            // We need to lock.  So we need memory.  So we have to force_acquire.
-            // Tracking the same state for locals not backed by memory would just duplicate too
-            // much machinery.
-            // FIXME: We ignore alignment.
-            let (ptr, _, extra) = self.force_allocation(query.place.1)?.to_ptr_align_extra();
-            // Determine the size
-            // FIXME: Can we reuse size_and_align_of_dst for Places?
-            let layout = self.layout_of(query.ty)?;
-            let len = if !layout.is_unsized() {
-                assert_eq!(extra, PlaceExtra::None, "Got a fat ptr to a sized type");
-                layout.size.bytes()
-            } else {
-                // The only unsized typ we concider "owning" is TyStr.
-                assert_eq!(
-                    query.ty.sty,
-                    TyStr,
-                    "Found a surprising unsized owning type"
-                );
-                // The extra must be the length, in bytes.
-                match extra {
-                    PlaceExtra::Length(len) => len,
-                    _ => bug!("TyStr must have a length as extra"),
-                }
-            };
-            // Handle locking
-            if len > 0 {
-                let ptr = ptr.unwrap_or_err()?.to_ptr()?;
-                match query.mutbl {
-                    MutImmutable => {
-                        if mode.acquiring() {
-                            self.memory.acquire_lock(
-                                ptr,
-                                len,
-                                query.re,
-                                AccessKind::Read,
-                            )?;
-                        }
-                    }
-                    // No releasing of read locks, ever.
-                    MutMutable => {
-                        match mode {
-                            ValidationMode::Acquire => {
-                                self.memory.acquire_lock(
-                                    ptr,
-                                    len,
-                                    query.re,
-                                    AccessKind::Write,
-                                )?
-                            }
-                            ValidationMode::Recover(ending_ce) => {
-                                self.memory.recover_write_lock(
-                                    ptr,
-                                    len,
-                                    &query.place.0,
-                                    query.re,
-                                    ending_ce,
-                                )?
-                            }
-                            ValidationMode::ReleaseUntil(suspended_ce) => {
-                                self.memory.suspend_write_lock(
-                                    ptr,
-                                    len,
-                                    &query.place.0,
-                                    suspended_ce,
-                                )?
-                            }
-                        }
-                    }
-                }
-            }
-        }
-
-        let res: EvalResult<'tcx> = do catch {
-            match query.ty.sty {
-                TyInt(_) | TyUint(_) | TyRawPtr(_) => {
-                    if mode.acquiring() {
-                        // Make sure we can read this.
-                        let val = self.read_place(query.place.1)?;
-                        self.follow_by_ref_value(val, query.ty)?;
-                        // FIXME: It would be great to rule out Undef here, but that doesn't actually work.
-                        // Passing around undef data is a thing that e.g. Vec::extend_with does.
-                    }
-                }
-                TyBool | TyFloat(_) | TyChar => {
-                    if mode.acquiring() {
-                        let val = self.read_place(query.place.1)?;
-                        let val = self.value_to_scalar(ValTy { value: val, ty: query.ty })?;
-                        val.to_bytes()?;
-                        // TODO: Check if these are valid bool/float/codepoint/UTF-8
-                    }
-                }
-                TyNever => return err!(ValidationFailure(format!("The empty type is never valid."))),
-                TyRef(region, pointee_ty, mutbl) => {
-                    let val = self.read_place(query.place.1)?;
-                    // Sharing restricts our context
-                    if mutbl == MutImmutable {
-                        query.mutbl = MutImmutable;
-                    }
-                    // Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
-                    // we record the region of this borrow to the context.
-                    if query.re == None {
-                        if let ReScope(scope) = *region {
-                            query.re = Some(scope);
-                        }
-                        // It is possible for us to encounter erased lifetimes here because the lifetimes in
-                        // this functions' Subst will be erased.
-                    }
-                    self.validate_ptr(val, query.place.0, pointee_ty, query.re, query.mutbl, mode)?;
-                }
-                TyAdt(adt, _) if adt.is_box() => {
-                    let val = self.read_place(query.place.1)?;
-                    self.validate_ptr(val, query.place.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)?;
-                }
-                TyFnPtr(_sig) => {
-                    let ptr = self.read_place(query.place.1)?;
-                    let ptr = self.into_ptr(ptr)?.unwrap_or_err()?.to_ptr()?;
-                    self.memory.get_fn(ptr)?;
-                    // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
-                }
-                TyFnDef(..) => {
-                    // This is a zero-sized type with all relevant data sitting in the type.
-                    // There is nothing to validate.
-                }
-
-                // Compound types
-                TyStr => {
-                    // TODO: Validate strings
-                }
-                TySlice(elem_ty) => {
-                    let len = match query.place.1 {
-                        Place::Ptr { extra: PlaceExtra::Length(len), .. } => len,
-                        _ => {
-                            bug!(
-                                "acquire_valid of a TySlice given non-slice place: {:?}",
-                                query.place
-                            )
-                        }
-                    };
-                    for i in 0..len {
-                        let inner_place = self.place_index(query.place.1, query.ty, i)?;
-                        self.validate(
-                            ValidationQuery {
-                                place: (query.place.0.clone().index(i), inner_place),
-                                ty: elem_ty,
-                                ..query
-                            },
-                            mode,
-                        )?;
-                    }
-                }
-                TyArray(elem_ty, len) => {
-                    let len = match len.val {
-                        ConstValue::Unevaluated(def_id, substs) => {
-                            self.tcx.const_eval(self.tcx.param_env(def_id).and(GlobalId {
-                                instance: Instance::new(def_id, substs),
-                                promoted: None,
-                            }))
-                                .map_err(|_err|EvalErrorKind::MachineError("<already reported>".to_string()))?
-                        }
-                        _ => len,
-                    };
-                    let len = len.unwrap_usize(self.tcx.tcx);
-                    for i in 0..len {
-                        let inner_place = self.place_index(query.place.1, query.ty, i as u64)?;
-                        self.validate(
-                            ValidationQuery {
-                                place: (query.place.0.clone().index(i as u64), inner_place),
-                                ty: elem_ty,
-                                ..query
-                            },
-                            mode,
-                        )?;
-                    }
-                }
-                TyDynamic(_data, _region) => {
-                    // Check that this is a valid vtable
-                    let vtable = match query.place.1 {
-                        Place::Ptr { extra: PlaceExtra::Vtable(vtable), .. } => vtable,
-                        _ => {
-                            bug!(
-                                "acquire_valid of a TyDynamic given non-trait-object place: {:?}",
-                                query.place
-                            )
-                        }
-                    };
-                    self.read_size_and_align_from_vtable(vtable)?;
-                    // TODO: Check that the vtable contains all the function pointers we expect it to have.
-                    // Trait objects cannot have any operations performed
-                    // on them directly.  We cannot, in general, even acquire any locks as the trait object *could*
-                    // contain an UnsafeCell.  If we call functions to get access to data, we will validate
-                    // their return values.  So, it doesn't seem like there's anything else to do.
-                }
-                TyAdt(adt, _) => {
-                    if Some(adt.did) == self.tcx.tcx.lang_items().unsafe_cell_type() &&
-                        query.mutbl == MutImmutable
-                    {
-                        // No locks for shared unsafe cells.  Also no other validation, the only field is private anyway.
-                        return Ok(());
-                    }
-
-                    match adt.adt_kind() {
-                        AdtKind::Enum => {
-                            let layout = self.layout_of(query.ty)?;
-                            let variant_idx = self.read_discriminant_as_variant_index(query.place.1, layout)?;
-                            let variant = &adt.variants[variant_idx];
-
-                            if !variant.fields.is_empty() {
-                                // Downcast to this variant, if needed
-                                let place = if adt.is_enum() {
-                                    (
-                                        query.place.0.downcast(adt, variant_idx),
-                                        self.eval_place_projection(
-                                            query.place.1,
-                                            query.ty,
-                                            &mir::ProjectionElem::Downcast(adt, variant_idx),
-                                        )?,
-                                    )
-                                } else {
-                                    query.place
-                                };
-
-                                // Recursively validate the fields
-                                self.validate_fields(
-                                    ValidationQuery { place, ..query },
-                                    mode,
-                                )?;
-                            } else {
-                                // No fields, nothing left to check.  Downcasting may fail, e.g. in case of a CEnum.
-                            }
-                        }
-                        AdtKind::Struct => {
-                            self.validate_fields(query, mode)?;
-                        }
-                        AdtKind::Union => {
-                            // No guarantees are provided for union types.
-                            // TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
-                        }
-                    }
-                }
-                TyTuple(..) |
-                TyClosure(..) => {
-                    // TODO: Check if the signature matches for `TyClosure`
-                    // (should be the same check as what terminator/mod.rs already does on call?).
-                    // Is there other things we can/should check?  Like vtable pointers?
-                    self.validate_fields(query, mode)?;
-                }
-                // FIXME: generators aren't validated right now
-                TyGenerator(..) => {},
-                _ => bug!("We already established that this is a type we support. ({})", query.ty),
-            }
-        };
-        match res {
-            // ReleaseUntil(None) of an uninitalized variable is a NOP.  This is needed because
-            // we have to release the return value of a function; due to destination-passing-style
-            // the callee may directly write there.
-            // TODO: Ideally we would know whether the destination is already initialized, and only
-            // release if it is.  But of course that can't even always be statically determined.
-            Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
-                if mode == ValidationMode::ReleaseUntil(None) => {
-                Ok(())
-            }
-            res => res,
-        }
-    }
-}
index 61e9a965889669556d36c798a322e28e3deda2f4..ec8e16d33e42b7eba0f1137299bf651f3608b3ae 100644 (file)
 
 fn main() {
     // Make sure trans can emit all the intrinsics correctly
-    ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok();
-    ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok();
-    ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok();
-    ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok();
+    assert_eq!(ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed), Ok(0));
+    assert_eq!(ATOMIC.compare_exchange(0, 2, Acquire, Relaxed), Err(1));
+    assert_eq!(ATOMIC.compare_exchange(0, 1, Release, Relaxed), Err(1));
+    assert_eq!(ATOMIC.compare_exchange(1, 0, AcqRel, Relaxed), Ok(1));
     ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok();
     ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok();
     ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok();
     ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok();
     ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok();
-    ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok();
-    ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok();
-    ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok();
+
+    ATOMIC.store(0, SeqCst);
+
+    assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed), Ok(0));
+    assert_eq!(ATOMIC.compare_exchange_weak(0, 2, Acquire, Relaxed), Err(1));
+    assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed), Err(1));
+    assert_eq!(ATOMIC.compare_exchange_weak(1, 0, AcqRel, Relaxed), Ok(1));
     ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok();
     ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok();
     ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok();