]> git.lizzy.rs Git - rust.git/commitdiff
Update to Memory -> Allocation method move
authorOliver Scherer <github35764891676564198441@oli-obk.de>
Tue, 13 Nov 2018 16:19:42 +0000 (17:19 +0100)
committerOliver Scherer <github35764891676564198441@oli-obk.de>
Fri, 23 Nov 2018 16:00:31 +0000 (17:00 +0100)
src/fn_call.rs
src/intrinsic.rs
src/operator.rs
src/stacked_borrows.rs

index e64d4f6954878fd9553bf0aa39afd8b6f7ef22b1..e9d3255a5b32590519470be37804ad373e240654 100644 (file)
@@ -114,6 +114,8 @@ fn emulate_foreign_item(
             None => self.tcx.item_name(def_id).as_str(),
         };
 
+        let tcx = &{self.tcx.tcx};
+
         // All these functions take raw pointers, so if we access memory directly
         // (as opposed to through a place), we have to remember to erase any tag
         // that might still hang around!
@@ -175,7 +177,9 @@ fn emulate_foreign_item(
                         MiriMemoryKind::Rust.into()
                     )?
                     .with_default_tag();
-                self.memory_mut().write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
+                self.memory_mut()
+                    .get_mut(ptr.alloc_id)?
+                    .write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
                 self.write_scalar(Scalar::Ptr(ptr), dest)?;
             }
             "__rust_dealloc" => {
@@ -239,7 +243,7 @@ fn emulate_foreign_item(
             "dlsym" => {
                 let _handle = self.read_scalar(args[0])?;
                 let symbol = self.read_scalar(args[1])?.to_ptr()?;
-                let symbol_name = self.memory().read_c_str(symbol)?;
+                let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
                 let err = format!("bad c unicode symbol: {:?}", symbol_name);
                 let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
                 return err!(Unimplemented(format!(
@@ -346,7 +350,7 @@ fn emulate_foreign_item(
             "getenv" => {
                 let result = {
                     let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
-                    let name = self.memory().read_c_str(name_ptr)?;
+                    let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
                     match self.machine.env_vars.get(name) {
                         Some(&var) => Scalar::Ptr(var),
                         None => Scalar::ptr_null(&*self.tcx),
@@ -360,8 +364,8 @@ fn emulate_foreign_item(
                 {
                     let name_ptr = self.read_scalar(args[0])?.not_undef()?;
                     if !name_ptr.is_null_ptr(self) {
-                        let name = self.memory().read_c_str(name_ptr.to_ptr()?
-                            )?.to_owned();
+                        let name_ptr = name_ptr.to_ptr()?;
+                        let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
                         if !name.is_empty() && !name.contains(&b'=') {
                             success = Some(self.machine.env_vars.remove(&name));
                         }
@@ -382,9 +386,10 @@ fn emulate_foreign_item(
                 {
                     let name_ptr = self.read_scalar(args[0])?.not_undef()?;
                     let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
-                    let value = self.memory().read_c_str(value_ptr)?;
+                    let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
                     if !name_ptr.is_null_ptr(self) {
-                        let name = self.memory().read_c_str(name_ptr.to_ptr()?)?;
+                        let name_ptr = name_ptr.to_ptr()?;
+                        let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
                         if !name.is_empty() && !name.contains(&b'=') {
                             new = Some((name.to_owned(), value.to_owned()));
                         }
@@ -397,9 +402,12 @@ fn emulate_foreign_item(
                         Align::from_bytes(1).unwrap(),
                         MiriMemoryKind::Env.into(),
                     )?.with_default_tag();
-                    self.memory_mut().write_bytes(value_copy.into(), &value)?;
-                    let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), self)?.into();
-                    self.memory_mut().write_bytes(trailing_zero_ptr, &[0])?;
+                    {
+                        let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?;
+                        alloc.write_bytes(tcx, value_copy, &value)?;
+                        let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?;
+                        alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
+                    }
                     if let Some(var) = self.machine.env_vars.insert(
                         name.to_owned(),
                         value_copy,
@@ -444,7 +452,7 @@ fn emulate_foreign_item(
 
             "strlen" => {
                 let ptr = self.read_scalar(args[0])?.to_ptr()?;
-                let n = self.memory().read_c_str(ptr)?.len();
+                let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
                 self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
             }
 
@@ -507,13 +515,15 @@ fn emulate_foreign_item(
                 let key_layout = self.layout_of(key_type)?;
 
                 // Create key and write it into the memory where key_ptr wants it
-                let key = self.machine.tls.create_tls_key(dtor, &*self.tcx) as u128;
+                let key = self.machine.tls.create_tls_key(dtor, tcx) as u128;
                 if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
                     return err!(OutOfTls);
                 }
-                self.memory_mut().write_scalar(
+
+                self.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
+                self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
+                    tcx,
                     key_ptr,
-                    key_layout.align.abi,
                     Scalar::from_uint(key, key_layout.size).into(),
                     key_layout.size,
                 )?;
@@ -610,7 +620,7 @@ fn emulate_foreign_item(
                 // This just creates a key; Windows does not natively support TLS dtors.
 
                 // Create key and return it
-                let key = self.machine.tls.create_tls_key(None, &*self.tcx) as u128;
+                let key = self.machine.tls.create_tls_key(None, tcx) as u128;
 
                 // Figure out how large a TLS key actually is. This is c::DWORD.
                 if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
index 66dab00e0975ccc4fac8ff22f84f8c2b508b272d..c9b16525e566268d5474fbdfe4e4ce0a9bd4550a 100644 (file)
@@ -28,7 +28,7 @@ fn call_intrinsic(
         if self.emulate_intrinsic(instance, args, dest)? {
             return Ok(());
         }
-
+        let tcx = &{self.tcx.tcx};
         let substs = instance.substs;
 
         // All these intrinsics take raw pointers, so if we access memory directly
@@ -248,6 +248,7 @@ fn call_intrinsic(
                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
                 // However, this only affects direct calls of the intrinsic; calls to the stable
                 // functions wrapping them do get their validation.
+                // FIXME: should we check that the destination pointer is aligned even for ZSTs?
                 if !dest.layout.is_zst() { // nothing to do for ZST
                     match dest.layout.abi {
                         layout::Abi::Scalar(ref s) => {
@@ -263,7 +264,9 @@ fn call_intrinsic(
                             // Do it in memory
                             let mplace = self.force_allocation(dest)?;
                             assert!(mplace.meta.is_none());
-                            self.memory_mut().write_repeat(mplace.ptr, 0, dest.layout.size)?;
+                            // not a zst, must be valid pointer
+                            let ptr = mplace.ptr.to_ptr()?;
+                            self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
                         }
                     }
                 }
@@ -412,6 +415,7 @@ fn call_intrinsic(
                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
                 // However, this only affects direct calls of the intrinsic; calls to the stable
                 // functions wrapping them do get their validation.
+                // FIXME: should we check alignment for ZSTs?
                 if !dest.layout.is_zst() { // nothing to do for ZST
                     match dest.layout.abi {
                         layout::Abi::Scalar(..) => {
@@ -426,7 +430,10 @@ fn call_intrinsic(
                             // Do it in memory
                             let mplace = self.force_allocation(dest)?;
                             assert!(mplace.meta.is_none());
-                            self.memory_mut().mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
+                            let ptr = mplace.ptr.to_ptr()?;
+                            self.memory_mut()
+                                .get_mut(ptr.alloc_id)?
+                                .mark_definedness(ptr, dest.layout.size, false)?;
                         }
                     }
                 }
@@ -439,7 +446,13 @@ fn call_intrinsic(
                 let ptr = self.read_scalar(args[0])?.not_undef()?;
                 let count = self.read_scalar(args[2])?.to_usize(self)?;
                 self.memory().check_align(ptr, ty_layout.align.abi)?;
-                self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?;
+                let byte_count = ty_layout.size * count;
+                if byte_count.bytes() != 0 {
+                    let ptr = ptr.to_ptr()?;
+                    self.memory_mut()
+                        .get_mut(ptr.alloc_id)?
+                        .write_repeat(tcx, ptr, val_byte, byte_count)?;
+                }
             }
 
             name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
index 2f3a0de999adf86c154339e4a3a8b3b77ed91cac..e1ccdf91995efaab70fbda980b1ca4c1cd0fda18 100644 (file)
@@ -142,10 +142,12 @@ fn ptr_eq(
                     // allocations sit right next to each other.  The C/C++ standards are
                     // somewhat fuzzy about this case, so I think for now this check is
                     // "good enough".
-                    // We require liveness, as dead allocations can of course overlap.
-                    self.memory().check_bounds_ptr(left, InboundsCheck::Live)?;
-                    self.memory().check_bounds_ptr(right, InboundsCheck::Live)?;
-                    // Two live in-bounds pointers, we can compare across allocations
+                    // Dead allocations in miri cannot overlap with live allocations, but
+                    // on read hardware this can easily happen. Thus for comparisons we require
+                    // both pointers to be live.
+                    self.memory().get(left.alloc_id)?.check_bounds_ptr(left)?;
+                    self.memory().get(right.alloc_id)?.check_bounds_ptr(right)?;
+                    // Two in-bounds pointers, we can compare across allocations
                     left == right
                 }
             }
@@ -158,7 +160,8 @@ fn ptr_eq(
                 // Case I: Comparing with NULL
                 if bits == 0 {
                     // Test if the ptr is in-bounds. Then it cannot be NULL.
-                    if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok() {
+                    // Even dangling pointers cannot be NULL.
+                    if self.memory().check_bounds_ptr_maybe_dead(ptr).is_ok() {
                         return Ok(false);
                     }
                 }
@@ -298,9 +301,10 @@ fn pointer_offset_inbounds(
         if let Scalar::Ptr(ptr) = ptr {
             // Both old and new pointer must be in-bounds of a *live* allocation.
             // (Of the same allocation, but that part is trivial with our representation.)
-            self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?;
+            let alloc = self.memory().get(ptr.alloc_id)?;
+            alloc.check_bounds_ptr(ptr)?;
             let ptr = ptr.signed_offset(offset, self)?;
-            self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?;
+            alloc.check_bounds_ptr(ptr)?;
             Ok(Scalar::Ptr(ptr))
         } else {
             // An integer pointer. They can only be offset by 0, and we pretend there
index f26ef40ea9f8342623f43e498d8387388b86f203..f292d083637ac6647b885dc2822d051e7e667fa1 100644 (file)
@@ -5,7 +5,7 @@
 
 use crate::{
     EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor,
-    MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, InboundsCheck,
+    MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra,
     Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy,
 };
 
@@ -500,8 +500,8 @@ fn ptr_dereference(
         }
 
         // Get the allocation
-        self.memory().check_bounds(ptr, size, InboundsCheck::Live)?;
-        let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
+        let alloc = self.memory().get(ptr.alloc_id)?;
+        alloc.check_bounds(self, ptr, size)?;
         // If we got here, we do some checking, *but* we leave the tag unchanged.
         if let Borrow::Shr(Some(_)) = ptr.tag {
             assert_eq!(mutability, Some(MutImmutable));
@@ -543,8 +543,8 @@ fn reborrow(
             ptr, place.layout.ty, new_bor);
 
         // Get the allocation.  It might not be mutable, so we cannot use `get_mut`.
-        self.memory().check_bounds(ptr, size, InboundsCheck::Live)?;
-        let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
+        let alloc = self.memory().get(ptr.alloc_id)?;
+        alloc.check_bounds(self, ptr, size)?;
         // Update the stacks.
         if let Borrow::Shr(Some(_)) = new_bor {
             // Reference that cares about freezing. We need a frozen-sensitive reborrow.