}
/// The information that makes up a memory access: offset and size.
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone)]
pub struct AllocRange {
pub start: Size,
pub size: Size,
}
+impl fmt::Debug for AllocRange {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
+ }
+}
+
/// Free-starting constructor for less syntactic overhead.
#[inline(always)]
pub fn alloc_range(start: Size, size: Size) -> AllocRange {
}
impl AllocRange {
+ #[inline]
+ pub fn from(r: Range<Size>) -> Self {
+ alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
+ }
+
#[inline(always)]
pub fn end(self) -> Size {
self.start + self.size // This does overflow checking.
/// Relocations.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Returns all relocations overlapping with the given pointer-offset pair.
- pub fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] {
+ fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
self.relocations.range(Size::from_bytes(start)..range.end())
}
+ /// Returns whether this allocation has relocations overlapping with the given range.
+ ///
+ /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat
+ /// limit access to relocations outside of the `Allocation` abstraction.
+ ///
+ pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
+ !self.get_relocations(cx, range).is_empty()
+ }
+
/// Checks that there are no relocations overlapping with the given range.
#[inline(always)]
fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
- if self.get_relocations(cx, range).is_empty() {
- Ok(())
- } else {
- Err(AllocError::ReadPointerAsBytes)
- }
+ if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) }
}
/// Removes all relocations inside the given range.
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
/// indexes for the first contiguous span of the uninitialized access.
#[inline]
- pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
+ pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
if end > self.len {
- return Err(self.len..end);
+ return Err(AllocRange::from(self.len..end));
}
let uninit_start = self.find_bit(start, end, false);
match uninit_start {
Some(uninit_start) => {
let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
- Err(uninit_start..uninit_end)
+ Err(AllocRange::from(uninit_start..uninit_end))
}
None => Ok(()),
}
///
/// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
/// indexes of the first contiguous uninitialized access.
- fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
+ fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> {
self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
}
/// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
/// error which will report the first range of bytes which is uninitialized.
fn check_init(&self, range: AllocRange) -> AllocResult {
- self.is_init(range).map_err(|idx_range| {
+ self.is_init(range).map_err(|uninit_range| {
AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
- access_offset: range.start,
- access_size: range.size,
- uninit_offset: idx_range.start,
- uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
+ access: range,
+ uninit: uninit_range,
}))
})
}