Several `-Z` flags are relevant for Miri:
-* `-Zmiri-seed=<hex>` is a custom `-Z` flag added by Miri. It enables the
- interpreted program to seed an RNG with system entropy. Miri will keep an RNG
- on its own that is seeded with the given seed, and use that to generate the
- "system entropy" that seeds the RNG(s) in the interpreted program.
+* `-Zmiri-seed=<hex>` is a custom `-Z` flag added by Miri. It configures the
+ seed of the RNG that Miri uses to resolve non-determinism. This RNG is used
+ to pick base addresses for allocations, and when the interpreted program
+ requests system entropy. The default seed is 0.
**NOTE**: This entropy is not good enough for cryptographic use! Do not
generate secret keys in Miri or perform other kinds of cryptographic
operations that rely on proper random numbers.
tcx.at(syntax::source_map::DUMMY_SP),
ty::ParamEnv::reveal_all(),
Evaluator::new(),
- MemoryExtra::new(config.seed.map(StdRng::seed_from_u64), config.validate),
+ MemoryExtra::new(StdRng::seed_from_u64(config.seed.unwrap_or(0)), config.validate),
);
let main_instance = ty::Instance::mono(ecx.tcx.tcx, main_id);
None => return Ok(()), // zero-sized access
};
- let data = match &mut this.memory_mut().extra.rng {
- Some(rng) => {
- let mut rng = rng.borrow_mut();
- let mut data = vec![0; len];
- rng.fill_bytes(&mut data);
- data
- }
- None => {
- return err!(Unimplemented(
- "miri does not support gathering system entropy in deterministic mode!
- Use '-Zmiri-seed=<seed>' to enable random number generation.
- WARNING: Miri does *not* generate cryptographically secure entropy -
- do not use Miri to run any program that needs secure random number generation".to_owned(),
- ));
- }
- };
+ let rng = this.memory_mut().extra.rng.get_mut();
+ let mut data = vec![0; len];
+ rng.fill_bytes(&mut data);
+
let tcx = &{this.tcx.tcx};
- this.memory_mut().get_mut(ptr.alloc_id)?
- .write_bytes(tcx, ptr, &data)
+ this.memory_mut().get_mut(ptr.alloc_id)?.write_bytes(tcx, ptr, &data)
}
/// Visits the memory covered by `place`, sensitive to freezing: the 3rd parameter
int: u64,
memory: &Memory<'mir, 'tcx, Evaluator<'tcx>>,
) -> InterpResult<'tcx, Pointer<Tag>> {
+ if int == 0 {
+ return err!(InvalidNullPointerUsage);
+ }
+
let global_state = memory.extra.intptrcast.borrow();
match global_state.int_to_ptr_map.binary_search_by_key(&int, |(addr, _)| *addr) {
// This allocation does not have a base address yet, pick one.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
let slack = {
- let mut rng = memory.extra.rng.as_ref().unwrap().borrow_mut();
+ let mut rng = memory.extra.rng.borrow_mut();
// This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
rng.gen_range(0, 16)
};
// From next_base_addr + slack, round up to adjust for alignment.
- let base_addr = Self::align_addr(global_state.next_base_addr + slack, align.bytes());
+ let base_addr = global_state.next_base_addr.checked_add(slack).unwrap();
+ let base_addr = Self::align_addr(base_addr, align.bytes());
entry.insert(base_addr);
trace!(
"Assigning base address {:#x} to allocation {:?} (slack: {}, align: {})",
// Remember next base address. If this allocation is zero-sized, leave a gap
// of at least 1 to avoid two allocations having the same base address.
- global_state.next_base_addr = base_addr + max(size.bytes(), 1);
+ global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap();
// Given that `next_base_addr` increases in each allocation, pushing the
// corresponding tuple keeps `int_to_ptr_map` sorted
global_state.int_to_ptr_map.push((base_addr, ptr.alloc_id));
fn align_addr(addr: u64, align: u64) -> u64 {
match addr % align {
0 => addr,
- rem => addr + align - rem
+ rem => addr.checked_add(align).unwrap() - rem
}
}
}
pub stacked_borrows: stacked_borrows::MemoryExtra,
pub intptrcast: intptrcast::MemoryExtra,
- /// The random number generator to use if Miri is running in non-deterministic mode and to
- /// enable intptrcast
- pub(crate) rng: Option<RefCell<StdRng>>,
+ /// The random number generator used for resolving non-determinism.
+ pub(crate) rng: RefCell<StdRng>,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
}
impl MemoryExtra {
- pub fn new(rng: Option<StdRng>, validate: bool) -> Self {
+ pub fn new(rng: StdRng, validate: bool) -> Self {
MemoryExtra {
stacked_borrows: Default::default(),
intptrcast: Default::default(),
- rng: rng.map(RefCell::new),
+ rng: RefCell::new(rng),
validate,
}
}
Ok(ecx.memory().extra.stacked_borrows.borrow_mut().end_call(extra))
}
+ #[inline(always)]
fn int_to_ptr(
memory: &Memory<'mir, 'tcx, Self>,
int: u64,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
- if int == 0 {
- err!(InvalidNullPointerUsage)
- } else if memory.extra.rng.is_none() {
- err!(ReadBytesAsPointer)
- } else {
- intptrcast::GlobalState::int_to_ptr(int, memory)
- }
+ intptrcast::GlobalState::int_to_ptr(int, memory)
}
+ #[inline(always)]
fn ptr_to_int(
memory: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> InterpResult<'tcx, u64> {
- if memory.extra.rng.is_none() {
- err!(ReadPointerAsBytes)
- } else {
- intptrcast::GlobalState::ptr_to_int(ptr, memory)
- }
+ intptrcast::GlobalState::ptr_to_int(ptr, memory)
}
}
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
- // If intptrcast is enabled, treat everything of integer *type* at integer *value*.
- if self.memory().extra.rng.is_some() && left.layout.ty.is_integral() {
+ // Treat everything of integer *type* at integer *value*.
+ if left.layout.ty.is_integral() {
// This is actually an integer operation, so dispatch back to the core engine.
// TODO: Once intptrcast is the default, librustc_mir should never even call us
// for integer types.
right: Scalar<Tag>,
) -> InterpResult<'tcx, bool> {
let size = self.pointer_size();
- if self.memory().extra.rng.is_some() {
- // Just compare the integers.
- // TODO: Do we really want to *always* do that, even when comparing two live in-bounds pointers?
- let left = self.force_bits(left, size)?;
- let right = self.force_bits(right, size)?;
- return Ok(left == right);
- }
- Ok(match (left, right) {
- (Scalar::Raw { .. }, Scalar::Raw { .. }) =>
- left.to_bits(size)? == right.to_bits(size)?,
- (Scalar::Ptr(left), Scalar::Ptr(right)) => {
- // Comparison illegal if one of them is out-of-bounds, *unless* they
- // are in the same allocation.
- if left.alloc_id == right.alloc_id {
- left.offset == right.offset
- } else {
- // Make sure both pointers are in-bounds.
- // This accepts one-past-the end. Thus, there is still technically
- // some non-determinism that we do not fully rule out when two
- // allocations sit right next to each other. The C/C++ standards are
- // somewhat fuzzy about this case, so pragmatically speaking I think
- // for now this check is "good enough".
- // FIXME: Once we support intptrcast, we could try to fix these holes.
- // Dead allocations in miri cannot overlap with live allocations, but
- // on read hardware this can easily happen. Thus for comparisons we require
- // both pointers to be live.
- if self.pointer_inbounds(left).is_ok() && self.pointer_inbounds(right).is_ok() {
- // Two in-bounds (and hence live) pointers in different allocations are different.
- false
- } else {
- return err!(InvalidPointerMath);
- }
- }
- }
- // Comparing ptr and integer.
- (Scalar::Ptr(ptr), Scalar::Raw { data, size }) |
- (Scalar::Raw { data, size }, Scalar::Ptr(ptr)) => {
- assert_eq!(size as u64, self.pointer_size().bytes());
- let bits = data as u64;
-
- // Case I: Comparing real pointers with "small" integers.
- // Really we should only do this for NULL, but pragmatically speaking on non-bare-metal systems,
- // an allocation will never be at the very bottom of the address space.
- // Such comparisons can arise when comparing empty slices, which sometimes are "fake"
- // integer pointers (okay because the slice is empty) and sometimes point into a
- // real allocation.
- // The most common source of such integer pointers is `NonNull::dangling()`, which
- // equals the type's alignment. i128 might have an alignment of 16 bytes, but few types have
- // alignment 32 or higher, hence the limit of 32.
- // FIXME: Once we support intptrcast, we could try to fix these holes.
- if bits < 32 {
- // Test if the pointer can be different from NULL or not.
- // We assume that pointers that are not NULL are also not "small".
- if !self.memory().ptr_may_be_null(ptr) {
- return Ok(false);
- }
- }
-
- let (alloc_size, alloc_align) = self.memory()
- .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
- .expect("alloc info with MaybeDead cannot fail");
-
- // Case II: Alignment gives it away
- if ptr.offset.bytes() % alloc_align.bytes() == 0 {
- // The offset maintains the allocation alignment, so we know `base+offset`
- // is aligned by `alloc_align`.
- // FIXME: We could be even more general, e.g., offset 2 into a 4-aligned
- // allocation cannot equal 3.
- if bits % alloc_align.bytes() != 0 {
- // The integer is *not* aligned. So they cannot be equal.
- return Ok(false);
- }
- }
- // Case III: The integer is too big, and the allocation goes on a bit
- // without wrapping around the address space.
- {
- // Compute the highest address at which this allocation could live.
- // Substract one more, because it must be possible to add the size
- // to the base address without overflowing; that is, the very last address
- // of the address space is never dereferencable (but it can be in-bounds, i.e.,
- // one-past-the-end).
- let max_base_addr =
- ((1u128 << self.pointer_size().bits())
- - u128::from(alloc_size.bytes())
- - 1
- ) as u64;
- if let Some(max_addr) = max_base_addr.checked_add(ptr.offset.bytes()) {
- if bits > max_addr {
- // The integer is too big, this cannot possibly be equal.
- return Ok(false)
- }
- }
- }
-
- // None of the supported cases.
- return err!(InvalidPointerMath);
- }
- })
+ // Just compare the integers.
+ // TODO: Do we really want to *always* do that, even when comparing two live in-bounds pointers?
+ let left = self.force_bits(left, size)?;
+ let right = self.force_bits(right, size)?;
+ Ok(left == right)
}
fn ptr_int_arithmetic(
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(dead_code)]
-
-use std::mem;
-
-enum Tag<A> {
- Tag2(A)
-}
-
-struct Rec {
- c8: u8,
- t: Tag<u64>
-}
-
-fn mk_rec() -> Rec {
- return Rec { c8:0, t:Tag::Tag2(0) };
-}
-
-fn is_u64_aligned(u: &Tag<u64>) -> bool {
- let p: usize = unsafe { mem::transmute(u) };
- let u64_align = std::mem::align_of::<u64>();
- return (p & (u64_align + 1)) == 0; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes
-}
-
-pub fn main() {
- let x = mk_rec();
- assert!(is_u64_aligned(&x.t));
-}
fn main() {
let b = Box::new(42);
let g = unsafe {
- std::mem::transmute::<&usize, &fn(i32)>(&b)
+ std::mem::transmute::<&Box<usize>, &fn(i32)>(&b)
};
- (*g)(42) //~ ERROR a memory access tried to interpret some bytes as a pointer
+ (*g)(42) //~ ERROR tried to treat a memory pointer as a function pointer
}
std::mem::transmute::<usize, fn(i32)>(42)
};
- g(42) //~ ERROR a memory access tried to interpret some bytes as a pointer
+ g(42) //~ ERROR dangling pointer was dereferenced
}
+++ /dev/null
-// ignore-macos: Uses Linux-only APIs
-// ignore-windows: Uses Linux-only APIs
-
-#![feature(rustc_private)]
-extern crate libc;
-
-fn main() {
- let mut buf = [0u8; 5];
- unsafe {
- libc::syscall(libc::SYS_getrandom, buf.as_mut_ptr() as *mut libc::c_void, 5 as libc::size_t, 0 as libc::c_uint);
- //~^ ERROR miri does not support gathering system entropy in deterministic mode!
- }
-}
// Validation makes this fail in the wrong place
-// compile-flags: -Zmiri-disable-validation -Zmiri-seed=0000000000000000
+// compile-flags: -Zmiri-disable-validation
// Even with intptrcast and without validation, we want to be *sure* to catch bugs
// that arise from pointers being insufficiently aligned. The only way to achieve
+++ /dev/null
-// Validation makes this fail in the wrong place
-// compile-flags: -Zmiri-disable-validation -Zmiri-seed=0000000000000000
-
-fn main() {
- let g = unsafe {
- std::mem::transmute::<usize, fn(i32)>(42)
- };
-
- g(42) //~ ERROR dangling pointer was dereferenced
-}
+++ /dev/null
-// compile-flags: -Zmiri-seed=0000000000000000
-
-fn main() {
- let x: i32 = unsafe { *std::ptr::null() }; //~ ERROR invalid use of NULL pointer
- panic!("this should never print: {}", x);
-}
+++ /dev/null
-// compile-flags: -Zmiri-seed=0000000000000000
-
-fn main() {
- let p = 44 as *const i32;
- let x = unsafe { *p }; //~ ERROR dangling pointer was dereferenced
- panic!("this should never print: {}", x);
-}
--- /dev/null
+fn main() {
+ let x = 13;
+ let y = &x;
+ let z = &y as *const &i32 as *const u8;
+ // the deref fails, because we are reading only a part of the pointer
+ let _val = unsafe { *z }; //~ ERROR tried to access part of a pointer value as raw bytes
+}
+++ /dev/null
-fn main() {
- let x = 13;
- let y = &x;
- let z = &y as *const &i32 as *const usize;
- let ptr_bytes = unsafe { *z }; // the actual deref is fine, because we read the entire pointer at once
- let _val = ptr_bytes / 432; //~ ERROR invalid arithmetic on pointers that would leak base addresses
-}
+++ /dev/null
-fn main() {
- let x = 13;
- let y = &x;
- let z = &y as *const &i32 as *const u8;
- // the deref fails, because we are reading only a part of the pointer
- let _val = unsafe { *z }; //~ ERROR tried to access part of a pointer value as raw bytes
-}
+++ /dev/null
-fn main() {
- let bytes = [0i8, 1, 2, 3, 4, 5, 6, 7, 8, 9];
- let one = bytes.as_ptr().wrapping_offset(1);
- let three = bytes.as_ptr().wrapping_offset(3);
- let res = (one as usize) | (three as usize); //~ ERROR invalid arithmetic on pointers that would leak base addresses
- println!("{}", res);
-}
+++ /dev/null
-fn main() {
- let val = 13usize;
- let addr = &val as *const _ as usize;
- let _val = addr & 13; //~ ERROR access part of a pointer value as raw bytes
-}
+++ /dev/null
-fn main() {
- let b = Box::new(0);
- let x = &*b as *const i32; // soon-to-be dangling
- drop(b);
- let b = Box::new(0);
- let y = &*b as *const i32; // different allocation
- // We cannot compare these even though both are inbounds -- they *could* be
- // equal if memory was reused.
- assert!(x != y); //~ ERROR invalid arithmetic on pointers
-}
+++ /dev/null
-fn main() {
- let b = Box::new(0);
- let x = &*b as *const i32;
- // We cannot compare this with a non-NULL integer. After all, these *could* be equal (with the right base address).
- assert!(x != 64 as *const i32); //~ ERROR invalid arithmetic on pointers
-}
+++ /dev/null
-fn main() {
- let b = Box::new(0);
- let x = (&*b as *const i32).wrapping_sub(0x800); // out-of-bounds
- let b = Box::new(0);
- let y = &*b as *const i32; // different allocation
- // We cannot compare these even though both allocations are live -- they *could* be
- // equal (with the right base addresses).
- assert!(x != y); //~ ERROR invalid arithmetic on pointers
-}
+++ /dev/null
-fn main() {
- let b = Box::new(0);
- let x = (&*b as *const i32).wrapping_sub(0x800); // out-of-bounds
- // We cannot compare this with NULL. After all, this *could* be NULL (with the right base address).
- assert!(x != std::ptr::null()); //~ ERROR invalid arithmetic on pointers
-}
+++ /dev/null
-fn main() {
- let x = &1;
- // Casting down to u8 and back up to a pointer loses too much precision; this must not work.
- let x = x as *const i32;
- let x = x as u8; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes
- let x = x as *const i32;
- let _val = unsafe { *x };
-}
-// error-pattern: tried to interpret some bytes as a pointer
+// error-pattern: dangling pointer was dereferenced
fn main() {
// Can't offset an integer pointer by non-zero offset.
-// error-pattern: pointer value as raw bytes
+// error-pattern: dangling pointer was dereferenced
fn main() {
let ptr = Box::into_raw(Box::new(0u32));
+++ /dev/null
-fn main() {
- let val = 13usize;
- let addr = &val as *const _ as usize;
- let _val = addr % 16; //~ ERROR access part of a pointer value as raw bytes
-}
+++ /dev/null
-// error-pattern: pointer value as raw bytes
-
-fn main() {
- let ptr = Box::into_raw(Box::new(0u32));
- // Can't start with an integer pointer and get to something usable
- let ptr = (1 as *mut u8).wrapping_offset(ptr as isize);
- let _val = unsafe { *ptr };
-}
use std::mem;
fn main() {
- let _x: &i32 = unsafe { mem::transmute(16usize) }; //~ ERROR dangling reference (created from integer)
+ let _x: &i32 = unsafe { mem::transmute(16usize) }; //~ ERROR dangling reference (not entirely in bounds)
}
fn main() {
let p = 44 as *const i32;
- let x = unsafe { *p }; //~ ERROR a memory access tried to interpret some bytes as a pointer
+ let x = unsafe { *p }; //~ ERROR dangling pointer was dereferenced
panic!("this should never print: {}", x);
}
run_tests("compile-fail", path, target, flags);
}
-fn miri_pass(path: &str, target: &str, opt: bool, noseed: bool) {
+fn miri_pass(path: &str, target: &str, opt: bool) {
let opt_str = if opt { " with optimizations" } else { "" };
eprintln!("{}", format!(
"## Running run-pass tests in {} against miri for target {}{}",
let mut flags = Vec::new();
if opt {
flags.push("-Zmir-opt-level=3".to_owned());
- } else if !noseed {
- // Run with intptrcast. Avoid test matrix explosion by doing either this or opt-level=3.
- flags.push("-Zmiri-seed=".to_owned());
}
run_tests("ui", path, target, flags);
}
fn run_pass_miri(opt: bool) {
- miri_pass("tests/run-pass", &get_target(), opt, false);
- miri_pass("tests/run-pass-noseed", &get_target(), opt, true);
+ miri_pass("tests/run-pass", &get_target(), opt);
}
fn compile_fail_miri(opt: bool) {
+++ /dev/null
-// compile-flags: -Zmiri-seed=0000000000000000
-
-use std::collections::{self, HashMap};
-use std::hash::{BuildHasherDefault, BuildHasher};
-
-fn test_map<S: BuildHasher>(mut map: HashMap<i32, i32, S>) {
- map.insert(0, 0);
- assert_eq!(map.values().fold(0, |x, y| x+y), 0);
-
- let num = 25;
- for i in 1..num {
- map.insert(i, i);
- }
- assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2); // check the right things are in the table now
-
- // Inserting again replaces the existing entries
- for i in 0..num {
- map.insert(i, num-1-i);
- }
- assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2);
-
- // TODO: Test Entry API, Iterators, ...
-
-}
-
-fn main() {
- if cfg!(target_os = "macos") { // TODO: Implement libstd HashMap seeding for macOS (https://github.com/rust-lang/miri/issues/686).
- // Until then, use a deterministic map.
- test_map::<BuildHasherDefault<collections::hash_map::DefaultHasher>>(HashMap::default());
- } else {
- test_map(HashMap::new());
- }
-}
+++ /dev/null
-// compile-flags: -Zmiri-seed=
-#![feature(allocator_api)]
-
-use std::ptr::NonNull;
-use std::alloc::{Global, Alloc, Layout, System};
-use std::slice;
-
-fn check_alloc<T: Alloc>(mut allocator: T) { unsafe {
- for &align in &[4, 8, 16, 32] {
- let layout = Layout::from_size_align(20, align).unwrap();
-
- for _ in 0..32 {
- let a = allocator.alloc(layout).unwrap();
- assert_eq!(a.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
- allocator.dealloc(a, layout);
- }
-
- let p1 = allocator.alloc_zeroed(layout).unwrap();
- assert_eq!(p1.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
-
- let p2 = allocator.realloc(p1, layout, 40).unwrap();
- let layout = Layout::from_size_align(40, align).unwrap();
- assert_eq!(p2.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
- let slice = slice::from_raw_parts(p2.as_ptr(), 20);
- assert_eq!(&slice, &[0_u8; 20]);
-
- // old size == new size
- let p3 = allocator.realloc(p2, layout, 40).unwrap();
- assert_eq!(p3.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
- let slice = slice::from_raw_parts(p3.as_ptr(), 20);
- assert_eq!(&slice, &[0_u8; 20]);
-
- // old size > new size
- let p4 = allocator.realloc(p3, layout, 10).unwrap();
- let layout = Layout::from_size_align(10, align).unwrap();
- assert_eq!(p4.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
- let slice = slice::from_raw_parts(p4.as_ptr(), 10);
- assert_eq!(&slice, &[0_u8; 10]);
-
- allocator.dealloc(p4, layout);
- }
-} }
-
-fn check_align_requests<T: Alloc>(mut allocator: T) {
- for &size in &[2, 8, 64] { // size less than and bigger than alignment
- for &align in &[4, 8, 16, 32] { // Be sure to cover less than and bigger than `MIN_ALIGN` for all architectures
- let iterations = 32;
- unsafe {
- let pointers: Vec<_> = (0..iterations).map(|_| {
- allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
- }).collect();
- for &ptr in &pointers {
- assert_eq!((ptr.as_ptr() as usize) % align, 0,
- "Got a pointer less aligned than requested")
- }
-
- // Clean up.
- for &ptr in &pointers {
- allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap())
- }
- }
- }
- }
-}
-
-fn global_to_box() {
- type T = [i32; 4];
- let l = Layout::new::<T>();
- // allocate manually with global allocator, then turn into Box and free there
- unsafe {
- let ptr = Global.alloc(l).unwrap().as_ptr() as *mut T;
- let b = Box::from_raw(ptr);
- drop(b);
- }
-}
-
-fn box_to_global() {
- type T = [i32; 4];
- let l = Layout::new::<T>();
- // allocate with the Box, then deallocate manually with global allocator
- unsafe {
- let b = Box::new(T::default());
- let ptr = Box::into_raw(b);
- Global.dealloc(NonNull::new(ptr as *mut u8).unwrap(), l);
- }
-}
-
-fn main() {
- check_alloc(System);
- check_alloc(Global);
- check_align_requests(System);
- check_align_requests(Global);
- global_to_box();
- box_to_global();
-}
+++ /dev/null
-// compile-flags: -Zmiri-seed=0000000000000000
-
-// This returns a miri pointer at type usize, if the argument is a proper pointer
-fn transmute_ptr_to_int<T>(x: *const T) -> usize {
- unsafe { std::mem::transmute(x) }
-}
-
-fn main() {
- // Some casting-to-int with arithmetic.
- let x = &42 as *const i32 as usize;
- let y = x * 2;
- assert_eq!(y, x + x);
- let z = y as u8 as usize;
- assert_eq!(z, y % 256);
-
- // Pointer string formatting! We can't check the output as it changes when libstd changes,
- // but we can make sure Miri does not error.
- format!("{:?}", &mut 13 as *mut _);
-
- // Check that intptrcast is triggered for explicit casts and that it is consistent with
- // transmuting.
- let a: *const i32 = &42;
- let b = transmute_ptr_to_int(a) as u8;
- let c = a as usize as u8;
- assert_eq!(b, c);
-}
+++ /dev/null
-//ignore-windows: Uses POSIX APIs
-//compile-flags: -Zmiri-seed=
-
-#![feature(rustc_private)]
-
-use core::{slice, ptr};
-
-extern crate libc;
-
-fn main() {
- // Test that small allocations sometimes *are* not very aligned.
- let saw_unaligned = (0..64).any(|_| unsafe {
- let p = libc::malloc(3);
- libc::free(p);
- (p as usize) % 4 != 0 // find any that this is *not* 4-aligned
- });
- assert!(saw_unaligned);
-
- unsafe {
- // Use calloc for initialized memory
- let p1 = libc::calloc(20, 1);
-
- // old size < new size
- let p2 = libc::realloc(p1, 40);
- let slice = slice::from_raw_parts(p2 as *const u8, 20);
- assert_eq!(&slice, &[0_u8; 20]);
-
- // old size == new size
- let p3 = libc::realloc(p2, 40);
- let slice = slice::from_raw_parts(p3 as *const u8, 20);
- assert_eq!(&slice, &[0_u8; 20]);
-
- // old size > new size
- let p4 = libc::realloc(p3, 10);
- let slice = slice::from_raw_parts(p4 as *const u8, 10);
- assert_eq!(&slice, &[0_u8; 10]);
-
- libc::free(p4);
- }
-
- unsafe {
- let p1 = libc::malloc(20);
-
- let p2 = libc::realloc(p1, 0);
- assert!(p2.is_null());
- }
-
- unsafe {
- let p1 = libc::realloc(ptr::null_mut(), 20);
- assert!(!p1.is_null());
-
- libc::free(p1);
- }
-}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+use std::mem;
+
+enum Tag<A> {
+ Tag2(A)
+}
+
+struct Rec {
+ c8: u8,
+ t: Tag<u64>
+}
+
+fn mk_rec() -> Rec {
+ return Rec { c8:0, t:Tag::Tag2(0) };
+}
+
+fn is_u64_aligned(u: &Tag<u64>) -> bool {
+ let p: usize = unsafe { mem::transmute(u) };
+ let u64_align = std::mem::align_of::<u64>();
+ return (p & (u64_align + 1)) == 0;
+}
+
+pub fn main() {
+ let x = mk_rec();
+ is_u64_aligned(&x.t); // the result of this is non-deterministic (even with a fixed seed, results vary between platforms)
+}
--- /dev/null
+use std::collections::{self, HashMap};
+use std::hash::{BuildHasherDefault, BuildHasher};
+
+fn test_map<S: BuildHasher>(mut map: HashMap<i32, i32, S>) {
+ map.insert(0, 0);
+ assert_eq!(map.values().fold(0, |x, y| x+y), 0);
+
+ let num = 25;
+ for i in 1..num {
+ map.insert(i, i);
+ }
+ assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2); // check the right things are in the table now
+
+ // Inserting again replaces the existing entries
+ for i in 0..num {
+ map.insert(i, num-1-i);
+ }
+ assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2);
+
+ // TODO: Test Entry API, Iterators, ...
+
+}
+
+fn main() {
+ if cfg!(target_os = "macos") { // TODO: Implement libstd HashMap seeding for macOS (https://github.com/rust-lang/miri/issues/686).
+ // Until then, use a deterministic map.
+ test_map::<BuildHasherDefault<collections::hash_map::DefaultHasher>>(HashMap::default());
+ } else {
+ test_map(HashMap::new());
+ }
+}
--- /dev/null
+#![feature(allocator_api)]
+
+use std::ptr::NonNull;
+use std::alloc::{Global, Alloc, Layout, System};
+use std::slice;
+
+fn check_alloc<T: Alloc>(mut allocator: T) { unsafe {
+ for &align in &[4, 8, 16, 32] {
+ let layout = Layout::from_size_align(20, align).unwrap();
+
+ for _ in 0..32 {
+ let a = allocator.alloc(layout).unwrap();
+ assert_eq!(a.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
+ allocator.dealloc(a, layout);
+ }
+
+ let p1 = allocator.alloc_zeroed(layout).unwrap();
+ assert_eq!(p1.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
+
+ let p2 = allocator.realloc(p1, layout, 40).unwrap();
+ let layout = Layout::from_size_align(40, align).unwrap();
+ assert_eq!(p2.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
+ let slice = slice::from_raw_parts(p2.as_ptr(), 20);
+ assert_eq!(&slice, &[0_u8; 20]);
+
+ // old size == new size
+ let p3 = allocator.realloc(p2, layout, 40).unwrap();
+ assert_eq!(p3.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
+ let slice = slice::from_raw_parts(p3.as_ptr(), 20);
+ assert_eq!(&slice, &[0_u8; 20]);
+
+ // old size > new size
+ let p4 = allocator.realloc(p3, layout, 10).unwrap();
+ let layout = Layout::from_size_align(10, align).unwrap();
+ assert_eq!(p4.as_ptr() as usize % align, 0, "pointer is incorrectly aligned");
+ let slice = slice::from_raw_parts(p4.as_ptr(), 10);
+ assert_eq!(&slice, &[0_u8; 10]);
+
+ allocator.dealloc(p4, layout);
+ }
+} }
+
+fn check_align_requests<T: Alloc>(mut allocator: T) {
+ for &size in &[2, 8, 64] { // size less than and bigger than alignment
+ for &align in &[4, 8, 16, 32] { // Be sure to cover less than and bigger than `MIN_ALIGN` for all architectures
+ let iterations = 32;
+ unsafe {
+ let pointers: Vec<_> = (0..iterations).map(|_| {
+ allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
+ }).collect();
+ for &ptr in &pointers {
+ assert_eq!((ptr.as_ptr() as usize) % align, 0,
+ "Got a pointer less aligned than requested")
+ }
+
+ // Clean up.
+ for &ptr in &pointers {
+ allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap())
+ }
+ }
+ }
+ }
+}
+
+fn global_to_box() {
+ type T = [i32; 4];
+ let l = Layout::new::<T>();
+ // allocate manually with global allocator, then turn into Box and free there
+ unsafe {
+ let ptr = Global.alloc(l).unwrap().as_ptr() as *mut T;
+ let b = Box::from_raw(ptr);
+ drop(b);
+ }
+}
+
+fn box_to_global() {
+ type T = [i32; 4];
+ let l = Layout::new::<T>();
+ // allocate with the Box, then deallocate manually with global allocator
+ unsafe {
+ let b = Box::new(T::default());
+ let ptr = Box::into_raw(b);
+ Global.dealloc(NonNull::new(ptr as *mut u8).unwrap(), l);
+ }
+}
+
+fn main() {
+ check_alloc(System);
+ check_alloc(Global);
+ check_align_requests(System);
+ check_align_requests(Global);
+ global_to_box();
+ box_to_global();
+}
--- /dev/null
+// This returns a miri pointer at type usize, if the argument is a proper pointer
+fn transmute_ptr_to_int<T>(x: *const T) -> usize {
+ unsafe { std::mem::transmute(x) }
+}
+
+fn cast() {
+ // Some casting-to-int with arithmetic.
+ let x = &42 as *const i32 as usize;
+ let y = x * 2;
+ assert_eq!(y, x + x);
+ let z = y as u8 as usize;
+ assert_eq!(z, y % 256);
+}
+
+fn format() {
+ // Pointer string formatting! We can't check the output as it changes when libstd changes,
+ // but we can make sure Miri does not error.
+ format!("{:?}", &mut 13 as *mut _);
+}
+
+fn transmute() {
+ // Check that intptrcast is triggered for explicit casts and that it is consistent with
+ // transmuting.
+ let a: *const i32 = &42;
+ let b = transmute_ptr_to_int(a) as u8;
+ let c = a as usize as u8;
+ assert_eq!(b, c);
+}
+
+fn ptr_bitops1() {
+ let bytes = [0i8, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let one = bytes.as_ptr().wrapping_offset(1);
+ let three = bytes.as_ptr().wrapping_offset(3);
+ let res = (one as usize) | (three as usize);
+ format!("{}", res);
+}
+
+fn ptr_bitops2() {
+ let val = 13usize;
+ let addr = &val as *const _ as usize;
+ let _val = addr & 13;
+}
+
+fn ptr_eq_dangling() {
+ let b = Box::new(0);
+ let x = &*b as *const i32; // soon-to-be dangling
+ drop(b);
+ let b = Box::new(0);
+ let y = &*b as *const i32; // different allocation
+ // We cannot compare these even though both are inbounds -- they *could* be
+ // equal if memory was reused.
+ assert!(x != y);
+}
+
+fn ptr_eq_out_of_bounds() {
+ let b = Box::new(0);
+ let x = (&*b as *const i32).wrapping_sub(0x800); // out-of-bounds
+ let b = Box::new(0);
+ let y = &*b as *const i32; // different allocation
+ // We cannot compare these even though both allocations are live -- they *could* be
+ // equal (with the right base addresses).
+ assert!(x != y);
+}
+
+fn ptr_eq_out_of_bounds_null() {
+ let b = Box::new(0);
+ let x = (&*b as *const i32).wrapping_sub(0x800); // out-of-bounds
+ // We cannot compare this with NULL. After all, this *could* be NULL (with the right base address).
+ assert!(x != std::ptr::null());
+}
+
+fn ptr_eq_integer() {
+ let b = Box::new(0);
+ let x = &*b as *const i32;
+ // We cannot compare this with a non-NULL integer. After all, these *could* be equal (with the right base address).
+ assert!(x != 64 as *const i32);
+}
+
+fn main() {
+ cast();
+ format();
+ transmute();
+ ptr_bitops1();
+ ptr_bitops2();
+ ptr_eq_dangling();
+ ptr_eq_out_of_bounds();
+ ptr_eq_out_of_bounds_null();
+ ptr_eq_integer();
+}
--- /dev/null
+//ignore-windows: Uses POSIX APIs
+
+#![feature(rustc_private)]
+
+use core::{slice, ptr};
+
+extern crate libc;
+
+fn main() {
+ // Test that small allocations sometimes *are* not very aligned.
+ let saw_unaligned = (0..64).any(|_| unsafe {
+ let p = libc::malloc(3);
+ libc::free(p);
+ (p as usize) % 4 != 0 // find any that this is *not* 4-aligned
+ });
+ assert!(saw_unaligned);
+
+ unsafe {
+ // Use calloc for initialized memory
+ let p1 = libc::calloc(20, 1);
+
+ // old size < new size
+ let p2 = libc::realloc(p1, 40);
+ let slice = slice::from_raw_parts(p2 as *const u8, 20);
+ assert_eq!(&slice, &[0_u8; 20]);
+
+ // old size == new size
+ let p3 = libc::realloc(p2, 40);
+ let slice = slice::from_raw_parts(p3 as *const u8, 20);
+ assert_eq!(&slice, &[0_u8; 20]);
+
+ // old size > new size
+ let p4 = libc::realloc(p3, 10);
+ let slice = slice::from_raw_parts(p4 as *const u8, 10);
+ assert_eq!(&slice, &[0_u8; 10]);
+
+ libc::free(p4);
+ }
+
+ unsafe {
+ let p1 = libc::malloc(20);
+
+ let p2 = libc::realloc(p1, 0);
+ assert!(p2.is_null());
+ }
+
+ unsafe {
+ let p1 = libc::realloc(ptr::null_mut(), 20);
+ assert!(!p1.is_null());
+
+ libc::free(p1);
+ }
+}