1 use std::cell::RefCell;
3 use std::collections::hash_map::Entry;
8 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10 use rustc_target::abi::{HasDataLayout, Size};
14 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
15 pub enum ProvenanceMode {
16 /// We support `expose_addr`/`from_exposed_addr` via "wildcard" provenance.
17 /// However, we want on `from_exposed_addr` to alert the user of the precision loss.
19 /// Like `Default`, but without the warning.
21 /// We error on `from_exposed_addr`, ensuring no precision loss.
25 pub type GlobalState = RefCell<GlobalStateInner>;
27 #[derive(Clone, Debug)]
28 pub struct GlobalStateInner {
29 /// This is used as a map between the address of each allocation and its `AllocId`.
30 /// It is always sorted
31 int_to_ptr_map: Vec<(u64, AllocId)>,
32 /// The base address for each allocation. We cannot put that into
33 /// `AllocExtra` because function pointers also have a base address, and
34 /// they do not have an `AllocExtra`.
35 /// This is the inverse of `int_to_ptr_map`.
36 base_addr: FxHashMap<AllocId, u64>,
37 /// Whether an allocation has been exposed or not. This cannot be put
38 /// into `AllocExtra` for the same reason as `base_addr`.
39 exposed: FxHashSet<AllocId>,
40 /// This is used as a memory address when a new pointer is casted to an integer. It
41 /// is always larger than any address that was previously made part of a block.
43 /// The provenance to use for int2ptr casts
44 provenance_mode: ProvenanceMode,
47 impl GlobalStateInner {
48 pub fn new(config: &MiriConfig) -> Self {
50 int_to_ptr_map: Vec::default(),
51 base_addr: FxHashMap::default(),
52 exposed: FxHashSet::default(),
53 next_base_addr: STACK_ADDR,
54 provenance_mode: config.provenance_mode,
59 impl<'mir, 'tcx> GlobalStateInner {
60 // Returns the exposed `AllocId` that corresponds to the specified addr,
61 // or `None` if the addr is out of bounds
62 fn alloc_id_from_addr(ecx: &MiriEvalContext<'mir, 'tcx>, addr: u64) -> Option<AllocId> {
63 let global_state = ecx.machine.intptrcast.borrow();
64 assert!(global_state.provenance_mode != ProvenanceMode::Strict);
66 let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr);
68 // Determine the in-bounds provenance for this pointer.
69 // (This is only called on an actual access, so in-bounds is the only possible kind of provenance.)
70 let alloc_id = match pos {
71 Ok(pos) => Some(global_state.int_to_ptr_map[pos].1),
74 // This is the largest of the adresses smaller than `int`,
75 // i.e. the greatest lower bound (glb)
76 let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
77 // This never overflows because `addr >= glb`
78 let offset = addr - glb;
79 // If the offset exceeds the size of the allocation, don't use this `alloc_id`.
80 let size = ecx.get_alloc_info(alloc_id).0;
81 if offset <= size.bytes() { Some(alloc_id) } else { None }
85 // We only use this provenance if it has been exposed, *and* is still live.
86 if global_state.exposed.contains(&alloc_id) {
87 let (_size, _align, kind) = ecx.get_alloc_info(alloc_id);
89 AllocKind::LiveData | AllocKind::Function => return Some(alloc_id),
97 pub fn expose_ptr(ecx: &mut MiriEvalContext<'mir, 'tcx>, alloc_id: AllocId, sb: SbTag) {
98 let global_state = ecx.machine.intptrcast.get_mut();
99 // In strict mode, we don't need this, so we can save some cycles by not tracking it.
100 if global_state.provenance_mode != ProvenanceMode::Strict {
101 trace!("Exposing allocation id {alloc_id:?}");
102 global_state.exposed.insert(alloc_id);
103 if ecx.machine.stacked_borrows.is_some() {
104 ecx.expose_tag(alloc_id, sb);
109 pub fn ptr_from_addr_transmute(
110 ecx: &MiriEvalContext<'mir, 'tcx>,
112 ) -> Pointer<Option<Tag>> {
113 trace!("Transmuting 0x{:x} to a pointer", addr);
115 let provenance = if ecx.machine.allow_ptr_int_transmute {
116 // When we allow transmutes, treat them like casts: generating a wildcard pointer.
119 // Usually, we consider transmuted pointers to be "invalid" (`None` provenance).
122 Pointer::new(provenance, Size::from_bytes(addr))
125 pub fn ptr_from_addr_cast(
126 ecx: &MiriEvalContext<'mir, 'tcx>,
128 ) -> InterpResult<'tcx, Pointer<Option<Tag>>> {
129 trace!("Casting 0x{:x} to a pointer", addr);
131 let global_state = ecx.machine.intptrcast.borrow();
133 match global_state.provenance_mode {
134 ProvenanceMode::Default => {
135 // The first time this happens at a particular location, print a warning.
137 // `Span` is non-`Send`, so we use a thread-local instead.
138 static PAST_WARNINGS: RefCell<FxHashSet<Span>> = RefCell::default();
140 PAST_WARNINGS.with_borrow_mut(|past_warnings| {
141 let first = past_warnings.is_empty();
142 if past_warnings.insert(ecx.cur_span()) {
143 // Newly inserted, so first time we see this span.
144 register_diagnostic(NonHaltingDiagnostic::Int2Ptr { details: first });
148 ProvenanceMode::Strict => {
150 "integer-to-pointer casts and `from_exposed_addr` are not supported with `-Zmiri-strict-provenance`; use `with_addr` instead"
153 ProvenanceMode::Permissive => {}
156 // This is how wildcard pointers are born.
157 Ok(Pointer::new(Some(Tag::Wildcard), Size::from_bytes(addr)))
160 fn alloc_base_addr(ecx: &MiriEvalContext<'mir, 'tcx>, alloc_id: AllocId) -> u64 {
161 let mut global_state = ecx.machine.intptrcast.borrow_mut();
162 let global_state = &mut *global_state;
164 match global_state.base_addr.entry(alloc_id) {
165 Entry::Occupied(entry) => *entry.get(),
166 Entry::Vacant(entry) => {
167 // There is nothing wrong with a raw pointer being cast to an integer only after
168 // it became dangling. Hence we allow dead allocations.
169 let (size, align, _kind) = ecx.get_alloc_info(alloc_id);
171 // This allocation does not have a base address yet, pick one.
172 // Leave some space to the previous allocation, to give it some chance to be less aligned.
174 let mut rng = ecx.machine.rng.borrow_mut();
175 // This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
178 // From next_base_addr + slack, round up to adjust for alignment.
179 let base_addr = global_state.next_base_addr.checked_add(slack).unwrap();
180 let base_addr = Self::align_addr(base_addr, align.bytes());
181 entry.insert(base_addr);
183 "Assigning base address {:#x} to allocation {:?} (size: {}, align: {}, slack: {})",
191 // Remember next base address. If this allocation is zero-sized, leave a gap
192 // of at least 1 to avoid two allocations having the same base address.
193 // (The logic in `alloc_id_from_addr` assumes unique addresses, and function
194 // pointers to different functions need to be distinguishable!)
195 global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap();
196 // Given that `next_base_addr` increases in each allocation, pushing the
197 // corresponding tuple keeps `int_to_ptr_map` sorted
198 global_state.int_to_ptr_map.push((base_addr, alloc_id));
205 /// Convert a relative (tcx) pointer to an absolute address.
206 pub fn rel_ptr_to_addr(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer<AllocId>) -> u64 {
207 let (alloc_id, offset) = ptr.into_parts(); // offset is relative (AllocId provenance)
208 let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id);
210 // Add offset with the right kind of pointer-overflowing arithmetic.
211 let dl = ecx.data_layout();
212 dl.overflowing_offset(base_addr, offset.bytes()).0
215 /// When a pointer is used for a memory access, this computes where in which allocation the
217 pub fn abs_ptr_to_rel(
218 ecx: &MiriEvalContext<'mir, 'tcx>,
220 ) -> Option<(AllocId, Size)> {
221 let (tag, addr) = ptr.into_parts(); // addr is absolute (Tag provenance)
223 let alloc_id = if let Tag::Concrete { alloc_id, .. } = tag {
226 // A wildcard pointer.
227 GlobalStateInner::alloc_id_from_addr(ecx, addr.bytes())?
230 let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id);
232 // Wrapping "addr - base_addr"
233 let dl = ecx.data_layout();
234 let neg_base_addr = (base_addr as i64).wrapping_neg();
237 Size::from_bytes(dl.overflowing_signed_offset(addr.bytes(), neg_base_addr).0),
241 /// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
242 /// of `align` that is larger or equal to `addr`
243 fn align_addr(addr: u64, align: u64) -> u64 {
246 rem => addr.checked_add(align).unwrap() - rem,
256 fn test_align_addr() {
257 assert_eq!(GlobalStateInner::align_addr(37, 4), 40);
258 assert_eq!(GlobalStateInner::align_addr(44, 4), 44);