1 //! The virtual memory representation of the MIR interpreter.
4 use std::convert::TryFrom;
6 use std::ops::{Deref, DerefMut, Range};
8 use rustc_ast::ast::Mutability;
9 use rustc_data_structures::sorted_map::SortedMap;
10 use rustc_target::abi::{Align, HasDataLayout, Size};
13 read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUninit,
17 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
19 pub struct Allocation<Tag = (), Extra = ()> {
20 /// The actual bytes of the allocation.
21 /// Note that the bytes of a pointer represent the offset of the pointer.
23 /// Maps from byte addresses to extra data for each pointer.
24 /// Only the first byte of a pointer is inserted into the map; i.e.,
25 /// every entry in this map applies to `pointer_size` consecutive bytes starting
26 /// at the given offset.
27 relocations: Relocations<Tag>,
28 /// Denotes which part of this allocation is initialized.
30 /// The size of the allocation. Currently, must always equal `bytes.len()`.
32 /// The alignment of the allocation to detect unaligned reads.
33 /// (`Align` guarantees that this is a power of two.)
35 /// `true` if the allocation is mutable.
36 /// Also used by codegen to determine if a static should be put into mutable memory,
37 /// which happens for `static mut` and `static` with interior mutability.
38 pub mutability: Mutability,
39 /// Extra state for the machine.
43 pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Clone {
44 // There is no constructor in here because the constructor's type depends
45 // on `MemoryKind`, and making things sufficiently generic leads to painful
48 /// Hook for performing extra checks on a memory read access.
50 /// Takes read-only access to the allocation so we can keep all the memory read
51 /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
55 _alloc: &Allocation<Tag, Self>,
58 ) -> InterpResult<'tcx> {
62 /// Hook for performing extra checks on a memory write access.
65 _alloc: &mut Allocation<Tag, Self>,
68 ) -> InterpResult<'tcx> {
72 /// Hook for performing extra checks on a memory deallocation.
73 /// `size` will be the size of the allocation.
75 fn memory_deallocated(
76 _alloc: &mut Allocation<Tag, Self>,
79 ) -> InterpResult<'tcx> {
84 // For `Tag = ()` and no extra state, we have a trivial implementation.
85 impl AllocationExtra<()> for () {}
87 // The constructors are all without extra; the extra gets added by a machine hook later.
88 impl<Tag> Allocation<Tag> {
89 /// Creates a read-only allocation initialized by the given bytes
90 pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
91 let bytes = slice.into().into_owned();
92 let size = Size::from_bytes(bytes.len());
95 relocations: Relocations::new(),
96 init_mask: InitMask::new(size, true),
99 mutability: Mutability::Not,
104 pub fn from_byte_aligned_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
105 Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
108 pub fn uninit(size: Size, align: Align) -> Self {
110 bytes: vec![0; size.bytes_usize()],
111 relocations: Relocations::new(),
112 init_mask: InitMask::new(size, false),
115 mutability: Mutability::Mut,
121 impl Allocation<(), ()> {
122 /// Add Tag and Extra fields
123 pub fn with_tags_and_extra<T, E>(
125 mut tagger: impl FnMut(AllocId) -> T,
127 ) -> Allocation<T, E> {
131 relocations: Relocations::from_presorted(
134 // The allocations in the relocations (pointers stored *inside* this allocation)
135 // all get the base pointer tag.
136 .map(|&(offset, ((), alloc))| {
137 let tag = tagger(alloc);
138 (offset, (tag, alloc))
142 init_mask: self.init_mask,
144 mutability: self.mutability,
150 /// Raw accessors. Provide access to otherwise private bytes.
151 impl<Tag, Extra> Allocation<Tag, Extra> {
152 pub fn len(&self) -> usize {
153 self.size.bytes_usize()
156 /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
157 /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
158 /// edges) at all. It further ignores `AllocationExtra` callbacks.
159 /// This must not be used for reads affecting the interpreter execution.
160 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
164 /// Returns the mask indicating which bytes are initialized.
165 pub fn init_mask(&self) -> &InitMask {
169 /// Returns the relocation list.
170 pub fn relocations(&self) -> &Relocations<Tag> {
176 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
177 /// Just a small local helper function to avoid a bit of code repetition.
178 /// Returns the range of this allocation that was meant.
180 fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
181 let end = offset + size; // This does overflow checking.
182 let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
185 "Out-of-bounds access at offset {}, size {} in allocation of size {}",
190 offset.bytes_usize()..end
193 /// The last argument controls whether we error out when there are uninitialized
194 /// or pointer bytes. You should never call this, call `get_bytes` or
195 /// `get_bytes_with_uninit_and_ptr` instead,
197 /// This function also guarantees that the resulting pointer will remain stable
198 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
201 /// It is the caller's responsibility to check bounds and alignment beforehand.
202 fn get_bytes_internal(
204 cx: &impl HasDataLayout,
207 check_init_and_ptr: bool,
208 ) -> InterpResult<'tcx, &[u8]> {
209 let range = self.check_bounds(ptr.offset, size);
211 if check_init_and_ptr {
212 self.check_init(ptr, size)?;
213 self.check_relocations(cx, ptr, size)?;
215 // We still don't want relocations on the *edges*.
216 self.check_relocation_edges(cx, ptr, size)?;
219 AllocationExtra::memory_read(self, ptr, size)?;
221 Ok(&self.bytes[range])
224 /// Checks that these bytes are initialized and not pointer bytes, and then return them
227 /// It is the caller's responsibility to check bounds and alignment beforehand.
228 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
229 /// on `InterpCx` instead.
233 cx: &impl HasDataLayout,
236 ) -> InterpResult<'tcx, &[u8]> {
237 self.get_bytes_internal(cx, ptr, size, true)
240 /// It is the caller's responsibility to handle uninitialized and pointer bytes.
241 /// However, this still checks that there are no relocations on the *edges*.
243 /// It is the caller's responsibility to check bounds and alignment beforehand.
245 pub fn get_bytes_with_uninit_and_ptr(
247 cx: &impl HasDataLayout,
250 ) -> InterpResult<'tcx, &[u8]> {
251 self.get_bytes_internal(cx, ptr, size, false)
254 /// Just calling this already marks everything as defined and removes relocations,
255 /// so be sure to actually put data there!
257 /// It is the caller's responsibility to check bounds and alignment beforehand.
258 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
259 /// on `InterpCx` instead.
260 pub fn get_bytes_mut(
262 cx: &impl HasDataLayout,
265 ) -> InterpResult<'tcx, &mut [u8]> {
266 let range = self.check_bounds(ptr.offset, size);
268 self.mark_init(ptr, size, true);
269 self.clear_relocations(cx, ptr, size)?;
271 AllocationExtra::memory_written(self, ptr, size)?;
273 Ok(&mut self.bytes[range])
277 /// Reading and writing.
278 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
279 /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
280 /// before a `0` is found.
282 /// Most likely, you want to call `Memory::read_c_str` instead of this method.
285 cx: &impl HasDataLayout,
287 ) -> InterpResult<'tcx, &[u8]> {
288 let offset = ptr.offset.bytes_usize();
289 Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
291 let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
292 // Go through `get_bytes` for checks and AllocationExtra hooks.
293 // We read the null, so we include it in the request, but we want it removed
294 // from the result, so we do subslicing.
295 &self.get_bytes(cx, ptr, size_with_null)?[..size]
297 // This includes the case where `offset` is out-of-bounds to begin with.
298 None => throw_ub!(UnterminatedCString(ptr.erase_tag())),
302 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
303 /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
304 /// given range contains neither relocations nor uninitialized bytes.
307 cx: &impl HasDataLayout,
310 allow_uninit_and_ptr: bool,
311 ) -> InterpResult<'tcx> {
312 // Check bounds and relocations on the edges.
313 self.get_bytes_with_uninit_and_ptr(cx, ptr, size)?;
314 // Check uninit and ptr.
315 if !allow_uninit_and_ptr {
316 self.check_init(ptr, size)?;
317 self.check_relocations(cx, ptr, size)?;
322 /// Writes `src` to the memory starting at `ptr.offset`.
324 /// It is the caller's responsibility to check bounds and alignment beforehand.
325 /// Most likely, you want to call `Memory::write_bytes` instead of this method.
328 cx: &impl HasDataLayout,
330 src: impl IntoIterator<Item = u8>,
331 ) -> InterpResult<'tcx> {
332 let mut src = src.into_iter();
333 let (lower, upper) = src.size_hint();
334 let len = upper.expect("can only write bounded iterators");
335 assert_eq!(lower, len, "can only write iterators with a precise length");
336 let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
337 // `zip` would stop when the first iterator ends; we want to definitely
338 // cover all of `bytes`.
340 *dest = src.next().expect("iterator was shorter than it said it would be");
342 src.next().expect_none("iterator was longer than it said it would be");
346 /// Reads a *non-ZST* scalar.
348 /// ZSTs can't be read for two reasons:
349 /// * byte-order cannot work with zero-element buffers;
350 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
351 /// pointers being valid for ZSTs.
353 /// It is the caller's responsibility to check bounds and alignment beforehand.
354 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
357 cx: &impl HasDataLayout,
360 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
361 // `get_bytes_unchecked` tests relocation edges.
362 let bytes = self.get_bytes_with_uninit_and_ptr(cx, ptr, size)?;
363 // Uninit check happens *after* we established that the alignment is correct.
364 // We must not return `Ok()` for unaligned pointers!
365 if self.is_init(ptr, size).is_err() {
366 // This inflates uninitialized bytes to the entire scalar, even if only a few
367 // bytes are uninitialized.
368 return Ok(ScalarMaybeUninit::Uninit);
370 // Now we do the actual reading.
371 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
372 // See if we got a pointer.
373 if size != cx.data_layout().pointer_size {
374 // *Now*, we better make sure that the inside is free of relocations too.
375 self.check_relocations(cx, ptr, size)?;
377 if let Some(&(tag, alloc_id)) = self.relocations.get(&ptr.offset) {
378 let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
379 return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
382 // We don't. Just return the bits.
383 Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, size)))
386 /// Reads a pointer-sized scalar.
388 /// It is the caller's responsibility to check bounds and alignment beforehand.
389 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
390 pub fn read_ptr_sized(
392 cx: &impl HasDataLayout,
394 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
395 self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
398 /// Writes a *non-ZST* scalar.
400 /// ZSTs can't be read for two reasons:
401 /// * byte-order cannot work with zero-element buffers;
402 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
403 /// pointers being valid for ZSTs.
405 /// It is the caller's responsibility to check bounds and alignment beforehand.
406 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
409 cx: &impl HasDataLayout,
411 val: ScalarMaybeUninit<Tag>,
413 ) -> InterpResult<'tcx> {
414 let val = match val {
415 ScalarMaybeUninit::Scalar(scalar) => scalar,
416 ScalarMaybeUninit::Uninit => {
417 self.mark_init(ptr, type_size, false);
422 let bytes = match val.to_bits_or_ptr(type_size, cx) {
423 Err(val) => u128::from(val.offset.bytes()),
427 let endian = cx.data_layout().endian;
428 let dst = self.get_bytes_mut(cx, ptr, type_size)?;
429 write_target_uint(endian, dst, bytes).unwrap();
431 // See if we have to also write a relocation.
432 if let Scalar::Ptr(val) = val {
433 self.relocations.insert(ptr.offset, (val.tag, val.alloc_id));
439 /// Writes a pointer-sized scalar.
441 /// It is the caller's responsibility to check bounds and alignment beforehand.
442 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
443 pub fn write_ptr_sized(
445 cx: &impl HasDataLayout,
447 val: ScalarMaybeUninit<Tag>,
448 ) -> InterpResult<'tcx> {
449 let ptr_size = cx.data_layout().pointer_size;
450 self.write_scalar(cx, ptr, val, ptr_size)
455 impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
456 /// Returns all relocations overlapping with the given pointer-offset pair.
457 pub fn get_relocations(
459 cx: &impl HasDataLayout,
462 ) -> &[(Size, (Tag, AllocId))] {
463 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
464 // the beginning of this range.
465 let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
466 let end = ptr.offset + size; // This does overflow checking.
467 self.relocations.range(Size::from_bytes(start)..end)
470 /// Checks that there are no relocations overlapping with the given range.
472 fn check_relocations(
474 cx: &impl HasDataLayout,
477 ) -> InterpResult<'tcx> {
478 if self.get_relocations(cx, ptr, size).is_empty() {
481 throw_unsup!(ReadPointerAsBytes)
485 /// Removes all relocations inside the given range.
486 /// If there are relocations overlapping with the edges, they
487 /// are removed as well *and* the bytes they cover are marked as
488 /// uninitialized. This is a somewhat odd "spooky action at a distance",
489 /// but it allows strictly more code to run than if we would just error
490 /// immediately in that case.
491 fn clear_relocations(
493 cx: &impl HasDataLayout,
496 ) -> InterpResult<'tcx> {
497 // Find the start and end of the given range and its outermost relocations.
498 let (first, last) = {
499 // Find all relocations overlapping the given range.
500 let relocations = self.get_relocations(cx, ptr, size);
501 if relocations.is_empty() {
506 relocations.first().unwrap().0,
507 relocations.last().unwrap().0 + cx.data_layout().pointer_size,
510 let start = ptr.offset;
511 let end = start + size; // `Size` addition
513 // Mark parts of the outermost relocations as uninitialized if they partially fall outside the
516 self.init_mask.set_range(first, start, false);
519 self.init_mask.set_range(end, last, false);
522 // Forget all the relocations.
523 self.relocations.remove_range(first..last);
528 /// Errors if there are relocations overlapping with the edges of the
529 /// given memory range.
531 fn check_relocation_edges(
533 cx: &impl HasDataLayout,
536 ) -> InterpResult<'tcx> {
537 self.check_relocations(cx, ptr, Size::ZERO)?;
538 self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
543 /// Uninitialized bytes.
544 impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
545 /// Checks whether the given range is entirely initialized.
547 /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
548 /// indexes of the first contiguous uninitialized access.
549 fn is_init(&self, ptr: Pointer<Tag>, size: Size) -> Result<(), Range<Size>> {
550 self.init_mask.is_range_initialized(ptr.offset, ptr.offset + size) // `Size` addition
553 /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
554 /// error which will report the first range of bytes which is uninitialized.
555 fn check_init(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
556 self.is_init(ptr, size).or_else(|idx_range| {
557 throw_ub!(InvalidUninitBytes(Some(Box::new(UninitBytesAccess {
558 access_ptr: ptr.erase_tag(),
560 uninit_ptr: Pointer::new(ptr.alloc_id, idx_range.start),
561 uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
566 pub fn mark_init(&mut self, ptr: Pointer<Tag>, size: Size, is_init: bool) {
567 if size.bytes() == 0 {
570 self.init_mask.set_range(ptr.offset, ptr.offset + size, is_init);
574 /// Run-length encoding of the uninit mask.
575 /// Used to copy parts of a mask multiple times to another allocation.
576 pub struct InitMaskCompressed {
577 /// Whether the first range is initialized.
579 /// The lengths of ranges that are run-length encoded.
580 /// The initialization state of the ranges alternate starting with `initial`.
581 ranges: smallvec::SmallVec<[u64; 1]>,
584 impl InitMaskCompressed {
585 pub fn no_bytes_init(&self) -> bool {
586 // The `ranges` are run-length encoded and of alternating initialization state.
587 // So if `ranges.len() > 1` then the second block is an initialized range.
588 !self.initial && self.ranges.len() == 1
592 /// Transferring the initialization mask to other allocations.
593 impl<Tag, Extra> Allocation<Tag, Extra> {
594 /// Creates a run-length encoding of the initialization mask.
595 pub fn compress_uninit_range(&self, src: Pointer<Tag>, size: Size) -> InitMaskCompressed {
596 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
597 // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
598 // the source and write it to the destination. Even if we optimized the memory accesses,
599 // we'd be doing all of this `repeat` times.
600 // Therefore we precompute a compressed version of the initialization mask of the source value and
601 // then write it back `repeat` times without computing any more information from the source.
603 // A precomputed cache for ranges of initialized / uninitialized bits
604 // 0000010010001110 will become
605 // `[5, 1, 2, 1, 3, 3, 1]`,
606 // where each element toggles the state.
608 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
609 let initial = self.init_mask.get(src.offset);
611 let mut cur = initial;
613 for i in 1..size.bytes() {
614 // FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
615 if self.init_mask.get(src.offset + Size::from_bytes(i)) == cur {
618 ranges.push(cur_len);
624 ranges.push(cur_len);
626 InitMaskCompressed { ranges, initial }
629 /// Applies multiple instances of the run-length encoding to the initialization mask.
630 pub fn mark_compressed_init_range(
632 defined: &InitMaskCompressed,
637 // An optimization where we can just overwrite an entire range of initialization
638 // bits if they are going to be uniformly `1` or `0`.
639 if defined.ranges.len() <= 1 {
640 self.init_mask.set_range_inbounds(
642 dest.offset + size * repeat, // `Size` operations
648 for mut j in 0..repeat {
650 j += dest.offset.bytes();
651 let mut cur = defined.initial;
652 for range in &defined.ranges {
655 self.init_mask.set_range_inbounds(
656 Size::from_bytes(old_j),
667 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
668 pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
670 impl<Tag, Id> Relocations<Tag, Id> {
671 pub fn new() -> Self {
672 Relocations(SortedMap::new())
675 // The caller must guarantee that the given relocations are already sorted
676 // by address and contain no duplicates.
677 pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
678 Relocations(SortedMap::from_presorted_elements(r))
682 impl<Tag> Deref for Relocations<Tag> {
683 type Target = SortedMap<Size, (Tag, AllocId)>;
685 fn deref(&self) -> &Self::Target {
690 impl<Tag> DerefMut for Relocations<Tag> {
691 fn deref_mut(&mut self) -> &mut Self::Target {
696 /// A partial, owned list of relocations to transfer into another allocation.
697 pub struct AllocationRelocations<Tag> {
698 relative_relocations: Vec<(Size, (Tag, AllocId))>,
701 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
702 pub fn prepare_relocation_copy(
704 cx: &impl HasDataLayout,
709 ) -> AllocationRelocations<Tag> {
710 let relocations = self.get_relocations(cx, src, size);
711 if relocations.is_empty() {
712 return AllocationRelocations { relative_relocations: Vec::new() };
715 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
718 new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
719 // compute offset for current repetition
720 let dest_offset = dest.offset + size * i; // `Size` operations
722 // shift offsets from source allocation to destination allocation
723 (offset + dest_offset) - src.offset, // `Size` operations
729 AllocationRelocations { relative_relocations: new_relocations }
732 /// Applies a relocation copy.
733 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
734 /// to be clear of relocations.
735 pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
736 self.relocations.insert_presorted(relocations.relative_relocations);
740 ////////////////////////////////////////////////////////////////////////////////
741 // Uninitialized byte tracking
742 ////////////////////////////////////////////////////////////////////////////////
746 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
747 /// is initialized. If it is `false` the byte is uninitialized.
748 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
749 #[derive(HashStable)]
750 pub struct InitMask {
756 pub const BLOCK_SIZE: u64 = 64;
758 pub fn new(size: Size, state: bool) -> Self {
759 let mut m = InitMask { blocks: vec![], len: Size::ZERO };
764 /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
766 /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
767 /// indexes for the first contiguous span of the uninitialized access.
769 pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
771 return Err(self.len..end);
774 // FIXME(oli-obk): optimize this for allocations larger than a block.
775 let idx = (start.bytes()..end.bytes()).map(Size::from_bytes).find(|&i| !self.get(i));
779 let uninit_end = (idx.bytes()..end.bytes())
780 .map(Size::from_bytes)
781 .find(|&i| self.get(i))
789 pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
792 self.grow(end - len, new_state);
794 self.set_range_inbounds(start, end, new_state);
797 pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
798 let (blocka, bita) = bit_index(start);
799 let (blockb, bitb) = bit_index(end);
800 if blocka == blockb {
801 // First set all bits except the first `bita`,
802 // then unset the last `64 - bitb` bits.
803 let range = if bitb == 0 {
806 (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
809 self.blocks[blocka] |= range;
811 self.blocks[blocka] &= !range;
815 // across block boundaries
817 // Set `bita..64` to `1`.
818 self.blocks[blocka] |= u64::MAX << bita;
819 // Set `0..bitb` to `1`.
821 self.blocks[blockb] |= u64::MAX >> (64 - bitb);
823 // Fill in all the other blocks (much faster than one bit at a time).
824 for block in (blocka + 1)..blockb {
825 self.blocks[block] = u64::MAX;
828 // Set `bita..64` to `0`.
829 self.blocks[blocka] &= !(u64::MAX << bita);
830 // Set `0..bitb` to `0`.
832 self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
834 // Fill in all the other blocks (much faster than one bit at a time).
835 for block in (blocka + 1)..blockb {
836 self.blocks[block] = 0;
842 pub fn get(&self, i: Size) -> bool {
843 let (block, bit) = bit_index(i);
844 (self.blocks[block] & (1 << bit)) != 0
848 pub fn set(&mut self, i: Size, new_state: bool) {
849 let (block, bit) = bit_index(i);
850 self.set_bit(block, bit, new_state);
854 fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
856 self.blocks[block] |= 1 << bit;
858 self.blocks[block] &= !(1 << bit);
862 pub fn grow(&mut self, amount: Size, new_state: bool) {
863 if amount.bytes() == 0 {
866 let unused_trailing_bits =
867 u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
868 if amount.bytes() > unused_trailing_bits {
869 let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
871 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
872 iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
875 let start = self.len;
877 self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
882 fn bit_index(bits: Size) -> (usize, usize) {
883 let bits = bits.bytes();
884 let a = bits / InitMask::BLOCK_SIZE;
885 let b = bits % InitMask::BLOCK_SIZE;
886 (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())