use std::cell::RefCell;
+use std::cmp::max;
use std::collections::hash_map::Entry;
use log::trace;
use rand::Rng;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_span::Span;
use rustc_target::abi::{HasDataLayout, Size};
use crate::*;
// This never overflows because `addr >= glb`
let offset = addr - glb;
// If the offset exceeds the size of the allocation, don't use this `alloc_id`.
-
- if offset
- <= ecx
- .get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead)
- .unwrap()
- .0
- .bytes()
- {
- Some(alloc_id)
- } else {
- None
- }
+ let size = ecx.get_alloc_info(alloc_id).0;
+ if offset <= size.bytes() { Some(alloc_id) } else { None }
}
}?;
// We only use this provenance if it has been exposed, *and* is still live.
if global_state.exposed.contains(&alloc_id) {
- // FIXME: this catches `InterpError`, which we should not usually do.
- // We might need a proper fallible API from `memory.rs` to avoid this though.
- if ecx.get_alloc_size_and_align(alloc_id, AllocCheck::Live).is_ok() {
- return Some(alloc_id);
+ let (_size, _align, kind) = ecx.get_alloc_info(alloc_id);
+ match kind {
+ AllocKind::LiveData | AllocKind::Function => return Some(alloc_id),
+ AllocKind::Dead => {}
}
}
match global_state.provenance_mode {
ProvenanceMode::Default => {
- // The first time this happens, print a warning.
- use std::sync::atomic::{AtomicBool, Ordering};
- static FIRST_WARNING: AtomicBool = AtomicBool::new(true);
- if FIRST_WARNING.swap(false, Ordering::Relaxed) {
- register_diagnostic(NonHaltingDiagnostic::Int2Ptr);
+ // The first time this happens at a particular location, print a warning.
+ thread_local! {
+ // `Span` is non-`Send`, so we use a thread-local instead.
+ static PAST_WARNINGS: RefCell<FxHashSet<Span>> = RefCell::default();
}
+ PAST_WARNINGS.with_borrow_mut(|past_warnings| {
+ let first = past_warnings.is_empty();
+ if past_warnings.insert(ecx.cur_span()) {
+ // Newly inserted, so first time we see this span.
+ register_diagnostic(NonHaltingDiagnostic::Int2Ptr { details: first });
+ }
+ });
}
ProvenanceMode::Strict => {
- throw_unsup_format!(
- "integer-to-pointer casts and `from_exposed_addr` are not supported with `-Zmiri-strict-provenance`; use `with_addr` instead"
- )
+ throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
}
ProvenanceMode::Permissive => {}
}
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
// There is nothing wrong with a raw pointer being cast to an integer only after
- // it became dangling. Hence `MaybeDead`.
- let (size, align) =
- ecx.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap();
+ // it became dangling. Hence we allow dead allocations.
+ let (size, align, _kind) = ecx.get_alloc_info(alloc_id);
// This allocation does not have a base address yet, pick one.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
slack,
);
- // Remember next base address. Leave a gap of at least 1 to avoid two zero-sized allocations
- // having the same base address, and to avoid ambiguous provenance for the address between two
- // allocations (also see https://github.com/rust-lang/unsafe-code-guidelines/issues/313).
- let size_plus_1 = size.bytes().checked_add(1).unwrap();
- global_state.next_base_addr = base_addr.checked_add(size_plus_1).unwrap();
+ // Remember next base address. If this allocation is zero-sized, leave a gap
+ // of at least 1 to avoid two allocations having the same base address.
+ // (The logic in `alloc_id_from_addr` assumes unique addresses, and function
+ // pointers to different functions need to be distinguishable!)
+ global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap();
// Given that `next_base_addr` increases in each allocation, pushing the
// corresponding tuple keeps `int_to_ptr_map` sorted
global_state.int_to_ptr_map.push((base_addr, alloc_id));
dl.overflowing_offset(base_addr, offset.bytes()).0
}
- /// Whena pointer is used for a memory access, this computes where in which allocation the
+ /// When a pointer is used for a memory access, this computes where in which allocation the
/// access is going.
pub fn abs_ptr_to_rel(
ecx: &MiriEvalContext<'mir, 'tcx>,