use std::cell::RefCell;
+use std::cmp::max;
use std::collections::hash_map::Entry;
use log::trace;
// This never overflows because `addr >= glb`
let offset = addr - glb;
// If the offset exceeds the size of the allocation, don't use this `alloc_id`.
-
- if offset
- <= ecx
- .get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead)
- .unwrap()
- .0
- .bytes()
- {
- Some(alloc_id)
- } else {
- None
- }
+ let size = ecx.get_alloc_info(alloc_id).0;
+ if offset <= size.bytes() { Some(alloc_id) } else { None }
}
}?;
// We only use this provenance if it has been exposed, *and* is still live.
if global_state.exposed.contains(&alloc_id) {
- // FIXME: this catches `InterpError`, which we should not usually do.
- // We might need a proper fallible API from `memory.rs` to avoid this though.
- if ecx.get_alloc_size_and_align(alloc_id, AllocCheck::Live).is_ok() {
- return Some(alloc_id);
+ let (_size, _align, kind) = ecx.get_alloc_info(alloc_id);
+ match kind {
+ AllocKind::LiveData | AllocKind::Function => return Some(alloc_id),
+ AllocKind::Dead => {}
}
}
});
}
ProvenanceMode::Strict => {
- throw_unsup_format!(
- "integer-to-pointer casts and `from_exposed_addr` are not supported with `-Zmiri-strict-provenance`; use `with_addr` instead"
- )
+ throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
}
ProvenanceMode::Permissive => {}
}
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
// There is nothing wrong with a raw pointer being cast to an integer only after
- // it became dangling. Hence `MaybeDead`.
- let (size, align) =
- ecx.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap();
+ // it became dangling. Hence we allow dead allocations.
+ let (size, align, _kind) = ecx.get_alloc_info(alloc_id);
// This allocation does not have a base address yet, pick one.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
slack,
);
- // Remember next base address. Leave a gap of at least 1 to avoid two zero-sized allocations
- // having the same base address, and to avoid ambiguous provenance for the address between two
- // allocations (also see https://github.com/rust-lang/unsafe-code-guidelines/issues/313).
- let size_plus_1 = size.bytes().checked_add(1).unwrap();
- global_state.next_base_addr = base_addr.checked_add(size_plus_1).unwrap();
+ // Remember next base address. If this allocation is zero-sized, leave a gap
+ // of at least 1 to avoid two allocations having the same base address.
+ // (The logic in `alloc_id_from_addr` assumes unique addresses, and function
+ // pointers to different functions need to be distinguishable!)
+ global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap();
// Given that `next_base_addr` increases in each allocation, pushing the
// corresponding tuple keeps `int_to_ptr_map` sorted
global_state.int_to_ptr_map.push((base_addr, alloc_id));