1 //! The virtual memory representation of the MIR interpreter.
4 use std::convert::TryFrom;
6 use std::ops::{Deref, DerefMut, Range};
9 use rustc_ast::Mutability;
10 use rustc_data_structures::sorted_map::SortedMap;
11 use rustc_span::DUMMY_SP;
12 use rustc_target::abi::{Align, HasDataLayout, Size};
15 read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer,
16 ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, UndefinedBehaviorInfo, UninitBytesAccess,
21 /// This type represents an Allocation in the Miri/CTFE core engine.
23 /// Its public API is rather low-level, working directly with allocation offsets and a custom error
24 /// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
25 /// module provides higher-level access.
26 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
28 pub struct Allocation<Tag = (), Extra = ()> {
29 /// The actual bytes of the allocation.
30 /// Note that the bytes of a pointer represent the offset of the pointer.
32 /// Maps from byte addresses to extra data for each pointer.
33 /// Only the first byte of a pointer is inserted into the map; i.e.,
34 /// every entry in this map applies to `pointer_size` consecutive bytes starting
35 /// at the given offset.
36 relocations: Relocations<Tag>,
37 /// Denotes which part of this allocation is initialized.
39 /// The alignment of the allocation to detect unaligned reads.
40 /// (`Align` guarantees that this is a power of two.)
42 /// `true` if the allocation is mutable.
43 /// Also used by codegen to determine if a static should be put into mutable memory,
44 /// which happens for `static mut` and `static` with interior mutability.
45 pub mutability: Mutability,
46 /// Extra state for the machine.
50 /// We have our own error type that does not know about the `AllocId`; that information
51 /// is added when converting to `InterpError`.
54 /// Encountered a pointer where we needed raw bytes.
56 /// Using uninitialized data where it is not allowed.
57 InvalidUninitBytes(Option<UninitBytesAccess>),
59 pub type AllocResult<T = ()> = Result<T, AllocError>;
62 pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
64 AllocError::ReadPointerAsBytes => {
65 InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes)
67 AllocError::InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
68 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
74 /// The information that makes up a memory access: offset and size.
75 #[derive(Copy, Clone, Debug)]
76 pub struct AllocRange {
81 /// Free-starting constructor for less syntactic overhead.
83 pub fn alloc_range(start: Size, size: Size) -> AllocRange {
84 AllocRange { start, size }
89 pub fn end(self) -> Size {
90 self.start + self.size // This does overflow checking.
93 /// Returns the `subrange` within this range; panics if it is not a subrange.
95 pub fn subrange(self, subrange: AllocRange) -> AllocRange {
96 let sub_start = self.start + subrange.start;
97 let range = alloc_range(sub_start, subrange.size);
98 assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
103 // The constructors are all without extra; the extra gets added by a machine hook later.
104 impl<Tag> Allocation<Tag> {
105 /// Creates an allocation initialized by the given bytes
106 pub fn from_bytes<'a>(
107 slice: impl Into<Cow<'a, [u8]>>,
109 mutability: Mutability,
111 let bytes = slice.into().into_owned();
112 let size = Size::from_bytes(bytes.len());
115 relocations: Relocations::new(),
116 init_mask: InitMask::new(size, true),
123 pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
124 Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
127 /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
128 /// available to the compiler to do so.
129 pub fn uninit(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'static, Self> {
130 let mut bytes = Vec::new();
131 bytes.try_reserve(size.bytes_usize()).map_err(|_| {
132 // This results in an error that can happen non-deterministically, since the memory
133 // available to the compiler can change between runs. Normally queries are always
134 // deterministic. However, we can be non-determinstic here because all uses of const
135 // evaluation (including ConstProp!) will make compilation fail (via hard error
136 // or ICE) upon encountering a `MemoryExhausted` error.
138 panic!("Allocation::uninit called with panic_on_fail had allocation failure")
140 ty::tls::with(|tcx| {
141 tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpreation")
143 InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
145 bytes.resize(size.bytes_usize(), 0);
148 relocations: Relocations::new(),
149 init_mask: InitMask::new(size, false),
151 mutability: Mutability::Mut,
157 impl Allocation<()> {
158 /// Add Tag and Extra fields
159 pub fn with_tags_and_extra<T, E>(
161 mut tagger: impl FnMut(AllocId) -> T,
163 ) -> Allocation<T, E> {
166 relocations: Relocations::from_presorted(
169 // The allocations in the relocations (pointers stored *inside* this allocation)
170 // all get the base pointer tag.
171 .map(|&(offset, ((), alloc))| {
172 let tag = tagger(alloc);
173 (offset, (tag, alloc))
177 init_mask: self.init_mask,
179 mutability: self.mutability,
185 /// Raw accessors. Provide access to otherwise private bytes.
186 impl<Tag, Extra> Allocation<Tag, Extra> {
187 pub fn len(&self) -> usize {
191 pub fn size(&self) -> Size {
192 Size::from_bytes(self.len())
195 /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
196 /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
198 /// This must not be used for reads affecting the interpreter execution.
199 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
203 /// Returns the mask indicating which bytes are initialized.
204 pub fn init_mask(&self) -> &InitMask {
208 /// Returns the relocation list.
209 pub fn relocations(&self) -> &Relocations<Tag> {
215 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
216 /// The last argument controls whether we error out when there are uninitialized
217 /// or pointer bytes. You should never call this, call `get_bytes` or
218 /// `get_bytes_with_uninit_and_ptr` instead,
220 /// This function also guarantees that the resulting pointer will remain stable
221 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
224 /// It is the caller's responsibility to check bounds and alignment beforehand.
225 fn get_bytes_internal(
227 cx: &impl HasDataLayout,
229 check_init_and_ptr: bool,
230 ) -> AllocResult<&[u8]> {
231 if check_init_and_ptr {
232 self.check_init(range)?;
233 self.check_relocations(cx, range)?;
235 // We still don't want relocations on the *edges*.
236 self.check_relocation_edges(cx, range)?;
239 Ok(&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
242 /// Checks that these bytes are initialized and not pointer bytes, and then return them
245 /// It is the caller's responsibility to check bounds and alignment beforehand.
246 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
247 /// on `InterpCx` instead.
249 pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
250 self.get_bytes_internal(cx, range, true)
253 /// It is the caller's responsibility to handle uninitialized and pointer bytes.
254 /// However, this still checks that there are no relocations on the *edges*.
256 /// It is the caller's responsibility to check bounds and alignment beforehand.
258 pub fn get_bytes_with_uninit_and_ptr(
260 cx: &impl HasDataLayout,
262 ) -> AllocResult<&[u8]> {
263 self.get_bytes_internal(cx, range, false)
266 /// Just calling this already marks everything as defined and removes relocations,
267 /// so be sure to actually put data there!
269 /// It is the caller's responsibility to check bounds and alignment beforehand.
270 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
271 /// on `InterpCx` instead.
272 pub fn get_bytes_mut(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> &mut [u8] {
273 self.mark_init(range, true);
274 self.clear_relocations(cx, range);
276 &mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
279 /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
280 pub fn get_bytes_mut_ptr(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> *mut [u8] {
281 self.mark_init(range, true);
282 self.clear_relocations(cx, range);
284 assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
285 let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
286 let len = range.end().bytes_usize() - range.start.bytes_usize();
287 ptr::slice_from_raw_parts_mut(begin_ptr, len)
291 /// Reading and writing.
292 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
293 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
294 /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
295 /// given range contains neither relocations nor uninitialized bytes.
298 cx: &impl HasDataLayout,
300 allow_uninit_and_ptr: bool,
302 // Check bounds and relocations on the edges.
303 self.get_bytes_with_uninit_and_ptr(cx, range)?;
304 // Check uninit and ptr.
305 if !allow_uninit_and_ptr {
306 self.check_init(range)?;
307 self.check_relocations(cx, range)?;
312 /// Reads a *non-ZST* scalar.
314 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
315 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
317 /// It is the caller's responsibility to check bounds and alignment beforehand.
318 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
321 cx: &impl HasDataLayout,
323 ) -> AllocResult<ScalarMaybeUninit<Tag>> {
324 // `get_bytes_unchecked` tests relocation edges.
325 let bytes = self.get_bytes_with_uninit_and_ptr(cx, range)?;
326 // Uninit check happens *after* we established that the alignment is correct.
327 // We must not return `Ok()` for unaligned pointers!
328 if self.is_init(range).is_err() {
329 // This inflates uninitialized bytes to the entire scalar, even if only a few
330 // bytes are uninitialized.
331 return Ok(ScalarMaybeUninit::Uninit);
333 // Now we do the actual reading.
334 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
335 // See if we got a pointer.
336 if range.size != cx.data_layout().pointer_size {
338 // *Now*, we better make sure that the inside is free of relocations too.
339 self.check_relocations(cx, range)?;
342 if let Some(&(tag, alloc_id)) = self.relocations.get(&range.start) {
343 let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
344 return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
347 // We don't. Just return the bits.
348 Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
351 /// Writes a *non-ZST* scalar.
353 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
354 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
356 /// It is the caller's responsibility to check bounds and alignment beforehand.
357 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
360 cx: &impl HasDataLayout,
362 val: ScalarMaybeUninit<Tag>,
364 assert!(self.mutability == Mutability::Mut);
366 let val = match val {
367 ScalarMaybeUninit::Scalar(scalar) => scalar,
368 ScalarMaybeUninit::Uninit => {
369 self.mark_init(range, false);
374 let bytes = match val.to_bits_or_ptr(range.size, cx) {
375 Err(val) => u128::from(val.offset.bytes()),
379 let endian = cx.data_layout().endian;
380 let dst = self.get_bytes_mut(cx, range);
381 write_target_uint(endian, dst, bytes).unwrap();
383 // See if we have to also write a relocation.
384 if let Scalar::Ptr(val) = val {
385 self.relocations.insert(range.start, (val.tag, val.alloc_id));
393 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
394 /// Returns all relocations overlapping with the given pointer-offset pair.
395 pub fn get_relocations(
397 cx: &impl HasDataLayout,
399 ) -> &[(Size, (Tag, AllocId))] {
400 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
401 // the beginning of this range.
402 let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
403 self.relocations.range(Size::from_bytes(start)..range.end())
406 /// Checks that there are no relocations overlapping with the given range.
408 fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
409 if self.get_relocations(cx, range).is_empty() {
412 Err(AllocError::ReadPointerAsBytes)
416 /// Removes all relocations inside the given range.
417 /// If there are relocations overlapping with the edges, they
418 /// are removed as well *and* the bytes they cover are marked as
419 /// uninitialized. This is a somewhat odd "spooky action at a distance",
420 /// but it allows strictly more code to run than if we would just error
421 /// immediately in that case.
422 fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
423 // Find the start and end of the given range and its outermost relocations.
424 let (first, last) = {
425 // Find all relocations overlapping the given range.
426 let relocations = self.get_relocations(cx, range);
427 if relocations.is_empty() {
432 relocations.first().unwrap().0,
433 relocations.last().unwrap().0 + cx.data_layout().pointer_size,
436 let start = range.start;
437 let end = range.end();
439 // Mark parts of the outermost relocations as uninitialized if they partially fall outside the
442 self.init_mask.set_range(first, start, false);
445 self.init_mask.set_range(end, last, false);
448 // Forget all the relocations.
449 self.relocations.remove_range(first..last);
452 /// Errors if there are relocations overlapping with the edges of the
453 /// given memory range.
455 fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
456 self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
457 self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
462 /// Uninitialized bytes.
463 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
464 /// Checks whether the given range is entirely initialized.
466 /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
467 /// indexes of the first contiguous uninitialized access.
468 fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
469 self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
472 /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
473 /// error which will report the first range of bytes which is uninitialized.
474 fn check_init(&self, range: AllocRange) -> AllocResult {
475 self.is_init(range).or_else(|idx_range| {
476 Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
477 access_offset: range.start,
478 access_size: range.size,
479 uninit_offset: idx_range.start,
480 uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
485 pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
486 if range.size.bytes() == 0 {
489 assert!(self.mutability == Mutability::Mut);
490 self.init_mask.set_range(range.start, range.end(), is_init);
494 /// Run-length encoding of the uninit mask.
495 /// Used to copy parts of a mask multiple times to another allocation.
496 pub struct InitMaskCompressed {
497 /// Whether the first range is initialized.
499 /// The lengths of ranges that are run-length encoded.
500 /// The initialization state of the ranges alternate starting with `initial`.
501 ranges: smallvec::SmallVec<[u64; 1]>,
504 impl InitMaskCompressed {
505 pub fn no_bytes_init(&self) -> bool {
506 // The `ranges` are run-length encoded and of alternating initialization state.
507 // So if `ranges.len() > 1` then the second block is an initialized range.
508 !self.initial && self.ranges.len() == 1
512 /// Transferring the initialization mask to other allocations.
513 impl<Tag, Extra> Allocation<Tag, Extra> {
514 /// Creates a run-length encoding of the initialization mask.
515 pub fn compress_uninit_range(&self, src: Pointer<Tag>, size: Size) -> InitMaskCompressed {
516 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
517 // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
518 // the source and write it to the destination. Even if we optimized the memory accesses,
519 // we'd be doing all of this `repeat` times.
520 // Therefore we precompute a compressed version of the initialization mask of the source value and
521 // then write it back `repeat` times without computing any more information from the source.
523 // A precomputed cache for ranges of initialized / uninitialized bits
524 // 0000010010001110 will become
525 // `[5, 1, 2, 1, 3, 3, 1]`,
526 // where each element toggles the state.
528 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
529 let initial = self.init_mask.get(src.offset);
531 let mut cur = initial;
533 for i in 1..size.bytes() {
534 // FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
535 if self.init_mask.get(src.offset + Size::from_bytes(i)) == cur {
538 ranges.push(cur_len);
544 ranges.push(cur_len);
546 InitMaskCompressed { ranges, initial }
549 /// Applies multiple instances of the run-length encoding to the initialization mask.
550 pub fn mark_compressed_init_range(
552 defined: &InitMaskCompressed,
557 // An optimization where we can just overwrite an entire range of initialization
558 // bits if they are going to be uniformly `1` or `0`.
559 if defined.ranges.len() <= 1 {
560 self.init_mask.set_range_inbounds(
562 dest.offset + size * repeat, // `Size` operations
568 for mut j in 0..repeat {
570 j += dest.offset.bytes();
571 let mut cur = defined.initial;
572 for range in &defined.ranges {
575 self.init_mask.set_range_inbounds(
576 Size::from_bytes(old_j),
587 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
588 pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
590 impl<Tag, Id> Relocations<Tag, Id> {
591 pub fn new() -> Self {
592 Relocations(SortedMap::new())
595 // The caller must guarantee that the given relocations are already sorted
596 // by address and contain no duplicates.
597 pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
598 Relocations(SortedMap::from_presorted_elements(r))
602 impl<Tag> Deref for Relocations<Tag> {
603 type Target = SortedMap<Size, (Tag, AllocId)>;
605 fn deref(&self) -> &Self::Target {
610 impl<Tag> DerefMut for Relocations<Tag> {
611 fn deref_mut(&mut self) -> &mut Self::Target {
616 /// A partial, owned list of relocations to transfer into another allocation.
617 pub struct AllocationRelocations<Tag> {
618 relative_relocations: Vec<(Size, (Tag, AllocId))>,
621 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
622 pub fn prepare_relocation_copy(
624 cx: &impl HasDataLayout,
628 ) -> AllocationRelocations<Tag> {
629 let relocations = self.get_relocations(cx, src);
630 if relocations.is_empty() {
631 return AllocationRelocations { relative_relocations: Vec::new() };
635 let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
638 new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
639 // compute offset for current repetition
640 let dest_offset = dest + size * i; // `Size` operations
642 // shift offsets from source allocation to destination allocation
643 (offset + dest_offset) - src.start, // `Size` operations
649 AllocationRelocations { relative_relocations: new_relocations }
652 /// Applies a relocation copy.
653 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
654 /// to be clear of relocations.
655 pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
656 self.relocations.insert_presorted(relocations.relative_relocations);
660 ////////////////////////////////////////////////////////////////////////////////
661 // Uninitialized byte tracking
662 ////////////////////////////////////////////////////////////////////////////////
666 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
667 /// is initialized. If it is `false` the byte is uninitialized.
668 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
669 #[derive(HashStable)]
670 pub struct InitMask {
676 pub const BLOCK_SIZE: u64 = 64;
678 pub fn new(size: Size, state: bool) -> Self {
679 let mut m = InitMask { blocks: vec![], len: Size::ZERO };
684 /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
686 /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
687 /// indexes for the first contiguous span of the uninitialized access.
689 pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
691 return Err(self.len..end);
694 // FIXME(oli-obk): optimize this for allocations larger than a block.
695 let idx = (start.bytes()..end.bytes()).map(Size::from_bytes).find(|&i| !self.get(i));
699 let uninit_end = (idx.bytes()..end.bytes())
700 .map(Size::from_bytes)
701 .find(|&i| self.get(i))
709 pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
712 self.grow(end - len, new_state);
714 self.set_range_inbounds(start, end, new_state);
717 pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
718 let (blocka, bita) = bit_index(start);
719 let (blockb, bitb) = bit_index(end);
720 if blocka == blockb {
721 // First set all bits except the first `bita`,
722 // then unset the last `64 - bitb` bits.
723 let range = if bitb == 0 {
726 (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
729 self.blocks[blocka] |= range;
731 self.blocks[blocka] &= !range;
735 // across block boundaries
737 // Set `bita..64` to `1`.
738 self.blocks[blocka] |= u64::MAX << bita;
739 // Set `0..bitb` to `1`.
741 self.blocks[blockb] |= u64::MAX >> (64 - bitb);
743 // Fill in all the other blocks (much faster than one bit at a time).
744 for block in (blocka + 1)..blockb {
745 self.blocks[block] = u64::MAX;
748 // Set `bita..64` to `0`.
749 self.blocks[blocka] &= !(u64::MAX << bita);
750 // Set `0..bitb` to `0`.
752 self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
754 // Fill in all the other blocks (much faster than one bit at a time).
755 for block in (blocka + 1)..blockb {
756 self.blocks[block] = 0;
762 pub fn get(&self, i: Size) -> bool {
763 let (block, bit) = bit_index(i);
764 (self.blocks[block] & (1 << bit)) != 0
768 pub fn set(&mut self, i: Size, new_state: bool) {
769 let (block, bit) = bit_index(i);
770 self.set_bit(block, bit, new_state);
774 fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
776 self.blocks[block] |= 1 << bit;
778 self.blocks[block] &= !(1 << bit);
782 pub fn grow(&mut self, amount: Size, new_state: bool) {
783 if amount.bytes() == 0 {
786 let unused_trailing_bits =
787 u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
788 if amount.bytes() > unused_trailing_bits {
789 let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
791 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
792 iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
795 let start = self.len;
797 self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
802 fn bit_index(bits: Size) -> (usize, usize) {
803 let bits = bits.bytes();
804 let a = bits / InitMask::BLOCK_SIZE;
805 let b = bits % InitMask::BLOCK_SIZE;
806 (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())