X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;f=compiler%2Frustc_middle%2Fsrc%2Fmir%2Finterpret%2Fallocation.rs;h=ae333846f067c73fce5f891d9831ed27fc5c28a8;hb=27b7b3dcd6b0dddc1dae6d4412f05c4aec8aa72f;hp=10c4ea63a68b2677158ef47e1015d66329f3bb11;hpb=116edb6800ea1d6615578e7f65366ae65364b3d8;p=rust.git diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 10c4ea63a68..ae333846f06 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -160,12 +160,18 @@ pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> { } /// The information that makes up a memory access: offset and size. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone)] pub struct AllocRange { pub start: Size, pub size: Size, } +impl fmt::Debug for AllocRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes()) + } +} + /// Free-starting constructor for less syntactic overhead. #[inline(always)] pub fn alloc_range(start: Size, size: Size) -> AllocRange { @@ -173,6 +179,11 @@ pub fn alloc_range(start: Size, size: Size) -> AllocRange { } impl AllocRange { + #[inline] + pub fn from(r: Range) -> Self { + alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked) + } + #[inline(always)] pub fn end(self) -> Size { self.start + self.size // This does overflow checking. @@ -537,21 +548,26 @@ pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> Al /// Relocations. impl Allocation { /// Returns all relocations overlapping with the given pointer-offset pair. - pub fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] { + fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); self.relocations.range(Size::from_bytes(start)..range.end()) } + /// Returns whether this allocation has relocations overlapping with the given range. + /// + /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat + /// limit access to relocations outside of the `Allocation` abstraction. + /// + pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool { + !self.get_relocations(cx, range).is_empty() + } + /// Checks that there are no relocations overlapping with the given range. #[inline(always)] fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { - if self.get_relocations(cx, range).is_empty() { - Ok(()) - } else { - Err(AllocError::ReadPointerAsBytes) - } + if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) } } /// Removes all relocations inside the given range. @@ -1084,9 +1100,9 @@ impl InitMask { /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte /// indexes for the first contiguous span of the uninitialized access. #[inline] - pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range> { + pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> { if end > self.len { - return Err(self.len..end); + return Err(AllocRange::from(self.len..end)); } let uninit_start = self.find_bit(start, end, false); @@ -1094,7 +1110,7 @@ pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range { let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end); - Err(uninit_start..uninit_end) + Err(AllocRange::from(uninit_start..uninit_end)) } None => Ok(()), } @@ -1165,19 +1181,17 @@ impl Allocation { /// /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte /// indexes of the first contiguous uninitialized access. - fn is_init(&self, range: AllocRange) -> Result<(), Range> { + fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> { self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition } /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes` /// error which will report the first range of bytes which is uninitialized. fn check_init(&self, range: AllocRange) -> AllocResult { - self.is_init(range).map_err(|idx_range| { + self.is_init(range).map_err(|uninit_range| { AllocError::InvalidUninitBytes(Some(UninitBytesAccess { - access_offset: range.start, - access_size: range.size, - uninit_offset: idx_range.start, - uninit_size: idx_range.end - idx_range.start, // `Size` subtraction + access: range, + uninit: uninit_range, })) }) }