From: Oliver Scherer Date: Tue, 13 Nov 2018 16:19:42 +0000 (+0100) Subject: Update to Memory -> Allocation method move X-Git-Url: https://git.lizzy.rs/?a=commitdiff_plain;h=32e93ed7762e5aa1a721636096848fc3c7bc7218;p=rust.git Update to Memory -> Allocation method move --- diff --git a/src/fn_call.rs b/src/fn_call.rs index e64d4f69548..e9d3255a5b3 100644 --- a/src/fn_call.rs +++ b/src/fn_call.rs @@ -114,6 +114,8 @@ fn emulate_foreign_item( None => self.tcx.item_name(def_id).as_str(), }; + let tcx = &{self.tcx.tcx}; + // All these functions take raw pointers, so if we access memory directly // (as opposed to through a place), we have to remember to erase any tag // that might still hang around! @@ -175,7 +177,9 @@ fn emulate_foreign_item( MiriMemoryKind::Rust.into() )? .with_default_tag(); - self.memory_mut().write_repeat(ptr.into(), 0, Size::from_bytes(size))?; + self.memory_mut() + .get_mut(ptr.alloc_id)? + .write_repeat(tcx, ptr, 0, Size::from_bytes(size))?; self.write_scalar(Scalar::Ptr(ptr), dest)?; } "__rust_dealloc" => { @@ -239,7 +243,7 @@ fn emulate_foreign_item( "dlsym" => { let _handle = self.read_scalar(args[0])?; let symbol = self.read_scalar(args[1])?.to_ptr()?; - let symbol_name = self.memory().read_c_str(symbol)?; + let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?; let err = format!("bad c unicode symbol: {:?}", symbol_name); let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); return err!(Unimplemented(format!( @@ -346,7 +350,7 @@ fn emulate_foreign_item( "getenv" => { let result = { let name_ptr = self.read_scalar(args[0])?.to_ptr()?; - let name = self.memory().read_c_str(name_ptr)?; + let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?; match self.machine.env_vars.get(name) { Some(&var) => Scalar::Ptr(var), None => Scalar::ptr_null(&*self.tcx), @@ -360,8 +364,8 @@ fn emulate_foreign_item( { let name_ptr = self.read_scalar(args[0])?.not_undef()?; if !name_ptr.is_null_ptr(self) { - let name = self.memory().read_c_str(name_ptr.to_ptr()? - )?.to_owned(); + let name_ptr = name_ptr.to_ptr()?; + let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned(); if !name.is_empty() && !name.contains(&b'=') { success = Some(self.machine.env_vars.remove(&name)); } @@ -382,9 +386,10 @@ fn emulate_foreign_item( { let name_ptr = self.read_scalar(args[0])?.not_undef()?; let value_ptr = self.read_scalar(args[1])?.to_ptr()?; - let value = self.memory().read_c_str(value_ptr)?; + let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?; if !name_ptr.is_null_ptr(self) { - let name = self.memory().read_c_str(name_ptr.to_ptr()?)?; + let name_ptr = name_ptr.to_ptr()?; + let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?; if !name.is_empty() && !name.contains(&b'=') { new = Some((name.to_owned(), value.to_owned())); } @@ -397,9 +402,12 @@ fn emulate_foreign_item( Align::from_bytes(1).unwrap(), MiriMemoryKind::Env.into(), )?.with_default_tag(); - self.memory_mut().write_bytes(value_copy.into(), &value)?; - let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), self)?.into(); - self.memory_mut().write_bytes(trailing_zero_ptr, &[0])?; + { + let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?; + alloc.write_bytes(tcx, value_copy, &value)?; + let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?; + alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?; + } if let Some(var) = self.machine.env_vars.insert( name.to_owned(), value_copy, @@ -444,7 +452,7 @@ fn emulate_foreign_item( "strlen" => { let ptr = self.read_scalar(args[0])?.to_ptr()?; - let n = self.memory().read_c_str(ptr)?.len(); + let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len(); self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?; } @@ -507,13 +515,15 @@ fn emulate_foreign_item( let key_layout = self.layout_of(key_type)?; // Create key and write it into the memory where key_ptr wants it - let key = self.machine.tls.create_tls_key(dtor, &*self.tcx) as u128; + let key = self.machine.tls.create_tls_key(dtor, tcx) as u128; if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) { return err!(OutOfTls); } - self.memory_mut().write_scalar( + + self.memory().check_align(key_ptr.into(), key_layout.align.abi)?; + self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar( + tcx, key_ptr, - key_layout.align.abi, Scalar::from_uint(key, key_layout.size).into(), key_layout.size, )?; @@ -610,7 +620,7 @@ fn emulate_foreign_item( // This just creates a key; Windows does not natively support TLS dtors. // Create key and return it - let key = self.machine.tls.create_tls_key(None, &*self.tcx) as u128; + let key = self.machine.tls.create_tls_key(None, tcx) as u128; // Figure out how large a TLS key actually is. This is c::DWORD. if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) { diff --git a/src/intrinsic.rs b/src/intrinsic.rs index 66dab00e097..c9b16525e56 100644 --- a/src/intrinsic.rs +++ b/src/intrinsic.rs @@ -28,7 +28,7 @@ fn call_intrinsic( if self.emulate_intrinsic(instance, args, dest)? { return Ok(()); } - + let tcx = &{self.tcx.tcx}; let substs = instance.substs; // All these intrinsics take raw pointers, so if we access memory directly @@ -248,6 +248,7 @@ fn call_intrinsic( // FIXME: We do not properly validate in case of ZSTs and when doing it in memory! // However, this only affects direct calls of the intrinsic; calls to the stable // functions wrapping them do get their validation. + // FIXME: should we check that the destination pointer is aligned even for ZSTs? if !dest.layout.is_zst() { // nothing to do for ZST match dest.layout.abi { layout::Abi::Scalar(ref s) => { @@ -263,7 +264,9 @@ fn call_intrinsic( // Do it in memory let mplace = self.force_allocation(dest)?; assert!(mplace.meta.is_none()); - self.memory_mut().write_repeat(mplace.ptr, 0, dest.layout.size)?; + // not a zst, must be valid pointer + let ptr = mplace.ptr.to_ptr()?; + self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?; } } } @@ -412,6 +415,7 @@ fn call_intrinsic( // FIXME: We do not properly validate in case of ZSTs and when doing it in memory! // However, this only affects direct calls of the intrinsic; calls to the stable // functions wrapping them do get their validation. + // FIXME: should we check alignment for ZSTs? if !dest.layout.is_zst() { // nothing to do for ZST match dest.layout.abi { layout::Abi::Scalar(..) => { @@ -426,7 +430,10 @@ fn call_intrinsic( // Do it in memory let mplace = self.force_allocation(dest)?; assert!(mplace.meta.is_none()); - self.memory_mut().mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?; + let ptr = mplace.ptr.to_ptr()?; + self.memory_mut() + .get_mut(ptr.alloc_id)? + .mark_definedness(ptr, dest.layout.size, false)?; } } } @@ -439,7 +446,13 @@ fn call_intrinsic( let ptr = self.read_scalar(args[0])?.not_undef()?; let count = self.read_scalar(args[2])?.to_usize(self)?; self.memory().check_align(ptr, ty_layout.align.abi)?; - self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?; + let byte_count = ty_layout.size * count; + if byte_count.bytes() != 0 { + let ptr = ptr.to_ptr()?; + self.memory_mut() + .get_mut(ptr.alloc_id)? + .write_repeat(tcx, ptr, val_byte, byte_count)?; + } } name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))), diff --git a/src/operator.rs b/src/operator.rs index 2f3a0de999a..e1ccdf91995 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -142,10 +142,12 @@ fn ptr_eq( // allocations sit right next to each other. The C/C++ standards are // somewhat fuzzy about this case, so I think for now this check is // "good enough". - // We require liveness, as dead allocations can of course overlap. - self.memory().check_bounds_ptr(left, InboundsCheck::Live)?; - self.memory().check_bounds_ptr(right, InboundsCheck::Live)?; - // Two live in-bounds pointers, we can compare across allocations + // Dead allocations in miri cannot overlap with live allocations, but + // on read hardware this can easily happen. Thus for comparisons we require + // both pointers to be live. + self.memory().get(left.alloc_id)?.check_bounds_ptr(left)?; + self.memory().get(right.alloc_id)?.check_bounds_ptr(right)?; + // Two in-bounds pointers, we can compare across allocations left == right } } @@ -158,7 +160,8 @@ fn ptr_eq( // Case I: Comparing with NULL if bits == 0 { // Test if the ptr is in-bounds. Then it cannot be NULL. - if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok() { + // Even dangling pointers cannot be NULL. + if self.memory().check_bounds_ptr_maybe_dead(ptr).is_ok() { return Ok(false); } } @@ -298,9 +301,10 @@ fn pointer_offset_inbounds( if let Scalar::Ptr(ptr) = ptr { // Both old and new pointer must be in-bounds of a *live* allocation. // (Of the same allocation, but that part is trivial with our representation.) - self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?; + let alloc = self.memory().get(ptr.alloc_id)?; + alloc.check_bounds_ptr(ptr)?; let ptr = ptr.signed_offset(offset, self)?; - self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?; + alloc.check_bounds_ptr(ptr)?; Ok(Scalar::Ptr(ptr)) } else { // An integer pointer. They can only be offset by 0, and we pretend there diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index f26ef40ea9f..f292d083637 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -5,7 +5,7 @@ use crate::{ EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor, - MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, InboundsCheck, + MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy, }; @@ -500,8 +500,8 @@ fn ptr_dereference( } // Get the allocation - self.memory().check_bounds(ptr, size, InboundsCheck::Live)?; - let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); + let alloc = self.memory().get(ptr.alloc_id)?; + alloc.check_bounds(self, ptr, size)?; // If we got here, we do some checking, *but* we leave the tag unchanged. if let Borrow::Shr(Some(_)) = ptr.tag { assert_eq!(mutability, Some(MutImmutable)); @@ -543,8 +543,8 @@ fn reborrow( ptr, place.layout.ty, new_bor); // Get the allocation. It might not be mutable, so we cannot use `get_mut`. - self.memory().check_bounds(ptr, size, InboundsCheck::Live)?; - let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); + let alloc = self.memory().get(ptr.alloc_id)?; + alloc.check_bounds(self, ptr, size)?; // Update the stacks. if let Borrow::Shr(Some(_)) = new_bor { // Reference that cares about freezing. We need a frozen-sensitive reborrow.