1 //! The virtual memory representation of the MIR interpreter.
4 use std::convert::TryFrom;
6 use std::ops::{Deref, DerefMut, Range};
8 use rustc_ast::ast::Mutability;
9 use rustc_data_structures::sorted_map::SortedMap;
10 use rustc_target::abi::{Align, HasDataLayout, Size};
13 read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef,
16 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
18 pub struct Allocation<Tag = (), Extra = ()> {
19 /// The actual bytes of the allocation.
20 /// Note that the bytes of a pointer represent the offset of the pointer.
22 /// Maps from byte addresses to extra data for each pointer.
23 /// Only the first byte of a pointer is inserted into the map; i.e.,
24 /// every entry in this map applies to `pointer_size` consecutive bytes starting
25 /// at the given offset.
26 relocations: Relocations<Tag>,
27 /// Denotes which part of this allocation is initialized.
28 undef_mask: UndefMask,
29 /// The size of the allocation. Currently, must always equal `bytes.len()`.
31 /// The alignment of the allocation to detect unaligned reads.
32 /// (`Align` guarantees that this is a power of two.)
34 /// `true` if the allocation is mutable.
35 /// Also used by codegen to determine if a static should be put into mutable memory,
36 /// which happens for `static mut` and `static` with interior mutability.
37 pub mutability: Mutability,
38 /// Extra state for the machine.
42 pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Clone {
43 // There is no constructor in here because the constructor's type depends
44 // on `MemoryKind`, and making things sufficiently generic leads to painful
47 /// Hook for performing extra checks on a memory read access.
49 /// Takes read-only access to the allocation so we can keep all the memory read
50 /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
54 _alloc: &Allocation<Tag, Self>,
57 ) -> InterpResult<'tcx> {
61 /// Hook for performing extra checks on a memory write access.
64 _alloc: &mut Allocation<Tag, Self>,
67 ) -> InterpResult<'tcx> {
71 /// Hook for performing extra checks on a memory deallocation.
72 /// `size` will be the size of the allocation.
74 fn memory_deallocated(
75 _alloc: &mut Allocation<Tag, Self>,
78 ) -> InterpResult<'tcx> {
83 // For `Tag = ()` and no extra state, we have a trivial implementation.
84 impl AllocationExtra<()> for () {}
86 // The constructors are all without extra; the extra gets added by a machine hook later.
87 impl<Tag> Allocation<Tag> {
88 /// Creates a read-only allocation initialized by the given bytes
89 pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
90 let bytes = slice.into().into_owned();
91 let size = Size::from_bytes(bytes.len());
94 relocations: Relocations::new(),
95 undef_mask: UndefMask::new(size, true),
98 mutability: Mutability::Not,
103 pub fn from_byte_aligned_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
104 Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
107 pub fn undef(size: Size, align: Align) -> Self {
109 bytes: vec![0; size.bytes_usize()],
110 relocations: Relocations::new(),
111 undef_mask: UndefMask::new(size, false),
114 mutability: Mutability::Mut,
120 impl Allocation<(), ()> {
121 /// Add Tag and Extra fields
122 pub fn with_tags_and_extra<T, E>(
124 mut tagger: impl FnMut(AllocId) -> T,
126 ) -> Allocation<T, E> {
130 relocations: Relocations::from_presorted(
133 // The allocations in the relocations (pointers stored *inside* this allocation)
134 // all get the base pointer tag.
135 .map(|&(offset, ((), alloc))| {
136 let tag = tagger(alloc);
137 (offset, (tag, alloc))
141 undef_mask: self.undef_mask,
143 mutability: self.mutability,
149 /// Raw accessors. Provide access to otherwise private bytes.
150 impl<Tag, Extra> Allocation<Tag, Extra> {
151 pub fn len(&self) -> usize {
152 self.size.bytes_usize()
155 /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
156 /// from `get_bytes_with_undef_and_ptr` in that it does no relocation checks (even on the
157 /// edges) at all. It further ignores `AllocationExtra` callbacks.
158 /// This must not be used for reads affecting the interpreter execution.
159 pub fn inspect_with_undef_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
163 /// Returns the undef mask.
164 pub fn undef_mask(&self) -> &UndefMask {
168 /// Returns the relocation list.
169 pub fn relocations(&self) -> &Relocations<Tag> {
174 impl<'tcx> rustc_serialize::UseSpecializedDecodable for &'tcx Allocation {}
177 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
178 /// Just a small local helper function to avoid a bit of code repetition.
179 /// Returns the range of this allocation that was meant.
181 fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
182 let end = offset + size; // This does overflow checking.
183 let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
186 "Out-of-bounds access at offset {}, size {} in allocation of size {}",
191 offset.bytes_usize()..end
194 /// The last argument controls whether we error out when there are undefined
195 /// or pointer bytes. You should never call this, call `get_bytes` or
196 /// `get_bytes_with_undef_and_ptr` instead,
198 /// This function also guarantees that the resulting pointer will remain stable
199 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
202 /// It is the caller's responsibility to check bounds and alignment beforehand.
203 fn get_bytes_internal(
205 cx: &impl HasDataLayout,
208 check_defined_and_ptr: bool,
209 ) -> InterpResult<'tcx, &[u8]> {
210 let range = self.check_bounds(ptr.offset, size);
212 if check_defined_and_ptr {
213 self.check_defined(ptr, size)?;
214 self.check_relocations(cx, ptr, size)?;
216 // We still don't want relocations on the *edges*.
217 self.check_relocation_edges(cx, ptr, size)?;
220 AllocationExtra::memory_read(self, ptr, size)?;
222 Ok(&self.bytes[range])
225 /// Checks that these bytes are initialized and not pointer bytes, and then return them
228 /// It is the caller's responsibility to check bounds and alignment beforehand.
229 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
230 /// on `InterpCx` instead.
234 cx: &impl HasDataLayout,
237 ) -> InterpResult<'tcx, &[u8]> {
238 self.get_bytes_internal(cx, ptr, size, true)
241 /// It is the caller's responsibility to handle undefined and pointer bytes.
242 /// However, this still checks that there are no relocations on the *edges*.
244 /// It is the caller's responsibility to check bounds and alignment beforehand.
246 pub fn get_bytes_with_undef_and_ptr(
248 cx: &impl HasDataLayout,
251 ) -> InterpResult<'tcx, &[u8]> {
252 self.get_bytes_internal(cx, ptr, size, false)
255 /// Just calling this already marks everything as defined and removes relocations,
256 /// so be sure to actually put data there!
258 /// It is the caller's responsibility to check bounds and alignment beforehand.
259 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
260 /// on `InterpCx` instead.
261 pub fn get_bytes_mut(
263 cx: &impl HasDataLayout,
266 ) -> InterpResult<'tcx, &mut [u8]> {
267 let range = self.check_bounds(ptr.offset, size);
269 self.mark_definedness(ptr, size, true);
270 self.clear_relocations(cx, ptr, size)?;
272 AllocationExtra::memory_written(self, ptr, size)?;
274 Ok(&mut self.bytes[range])
278 /// Reading and writing.
279 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
280 /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
281 /// before a `0` is found.
283 /// Most likely, you want to call `Memory::read_c_str` instead of this method.
286 cx: &impl HasDataLayout,
288 ) -> InterpResult<'tcx, &[u8]> {
289 let offset = ptr.offset.bytes_usize();
290 Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
292 let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
293 // Go through `get_bytes` for checks and AllocationExtra hooks.
294 // We read the null, so we include it in the request, but we want it removed
295 // from the result, so we do subslicing.
296 &self.get_bytes(cx, ptr, size_with_null)?[..size]
298 // This includes the case where `offset` is out-of-bounds to begin with.
299 None => throw_ub!(UnterminatedCString(ptr.erase_tag())),
303 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
304 /// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the
305 /// given range contains neither relocations nor undef bytes.
308 cx: &impl HasDataLayout,
311 allow_ptr_and_undef: bool,
312 ) -> InterpResult<'tcx> {
313 // Check bounds and relocations on the edges.
314 self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
315 // Check undef and ptr.
316 if !allow_ptr_and_undef {
317 self.check_defined(ptr, size)?;
318 self.check_relocations(cx, ptr, size)?;
323 /// Writes `src` to the memory starting at `ptr.offset`.
325 /// It is the caller's responsibility to check bounds and alignment beforehand.
326 /// Most likely, you want to call `Memory::write_bytes` instead of this method.
329 cx: &impl HasDataLayout,
331 src: impl IntoIterator<Item = u8>,
332 ) -> InterpResult<'tcx> {
333 let mut src = src.into_iter();
334 let (lower, upper) = src.size_hint();
335 let len = upper.expect("can only write bounded iterators");
336 assert_eq!(lower, len, "can only write iterators with a precise length");
337 let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
338 // `zip` would stop when the first iterator ends; we want to definitely
339 // cover all of `bytes`.
341 *dest = src.next().expect("iterator was shorter than it said it would be");
343 src.next().expect_none("iterator was longer than it said it would be");
347 /// Reads a *non-ZST* scalar.
349 /// ZSTs can't be read for two reasons:
350 /// * byte-order cannot work with zero-element buffers;
351 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
352 /// pointers being valid for ZSTs.
354 /// It is the caller's responsibility to check bounds and alignment beforehand.
355 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
358 cx: &impl HasDataLayout,
361 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
362 // `get_bytes_unchecked` tests relocation edges.
363 let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
364 // Undef check happens *after* we established that the alignment is correct.
365 // We must not return `Ok()` for unaligned pointers!
366 if self.is_defined(ptr, size).is_err() {
367 // This inflates undefined bytes to the entire scalar, even if only a few
368 // bytes are undefined.
369 return Ok(ScalarMaybeUndef::Undef);
371 // Now we do the actual reading.
372 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
373 // See if we got a pointer.
374 if size != cx.data_layout().pointer_size {
375 // *Now*, we better make sure that the inside is free of relocations too.
376 self.check_relocations(cx, ptr, size)?;
378 if let Some(&(tag, alloc_id)) = self.relocations.get(&ptr.offset) {
379 let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
380 return Ok(ScalarMaybeUndef::Scalar(ptr.into()));
383 // We don't. Just return the bits.
384 Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
387 /// Reads a pointer-sized scalar.
389 /// It is the caller's responsibility to check bounds and alignment beforehand.
390 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
391 pub fn read_ptr_sized(
393 cx: &impl HasDataLayout,
395 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
396 self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
399 /// Writes a *non-ZST* scalar.
401 /// ZSTs can't be read for two reasons:
402 /// * byte-order cannot work with zero-element buffers;
403 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
404 /// pointers being valid for ZSTs.
406 /// It is the caller's responsibility to check bounds and alignment beforehand.
407 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
410 cx: &impl HasDataLayout,
412 val: ScalarMaybeUndef<Tag>,
414 ) -> InterpResult<'tcx> {
415 let val = match val {
416 ScalarMaybeUndef::Scalar(scalar) => scalar,
417 ScalarMaybeUndef::Undef => {
418 self.mark_definedness(ptr, type_size, false);
423 let bytes = match val.to_bits_or_ptr(type_size, cx) {
424 Err(val) => u128::from(val.offset.bytes()),
428 let endian = cx.data_layout().endian;
429 let dst = self.get_bytes_mut(cx, ptr, type_size)?;
430 write_target_uint(endian, dst, bytes).unwrap();
432 // See if we have to also write a relocation.
433 if let Scalar::Ptr(val) = val {
434 self.relocations.insert(ptr.offset, (val.tag, val.alloc_id));
440 /// Writes a pointer-sized scalar.
442 /// It is the caller's responsibility to check bounds and alignment beforehand.
443 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
444 pub fn write_ptr_sized(
446 cx: &impl HasDataLayout,
448 val: ScalarMaybeUndef<Tag>,
449 ) -> InterpResult<'tcx> {
450 let ptr_size = cx.data_layout().pointer_size;
451 self.write_scalar(cx, ptr, val, ptr_size)
456 impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
457 /// Returns all relocations overlapping with the given pointer-offset pair.
458 pub fn get_relocations(
460 cx: &impl HasDataLayout,
463 ) -> &[(Size, (Tag, AllocId))] {
464 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
465 // the beginning of this range.
466 let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
467 let end = ptr.offset + size; // This does overflow checking.
468 self.relocations.range(Size::from_bytes(start)..end)
471 /// Checks that there are no relocations overlapping with the given range.
473 fn check_relocations(
475 cx: &impl HasDataLayout,
478 ) -> InterpResult<'tcx> {
479 if self.get_relocations(cx, ptr, size).is_empty() {
482 throw_unsup!(ReadPointerAsBytes)
486 /// Removes all relocations inside the given range.
487 /// If there are relocations overlapping with the edges, they
488 /// are removed as well *and* the bytes they cover are marked as
489 /// uninitialized. This is a somewhat odd "spooky action at a distance",
490 /// but it allows strictly more code to run than if we would just error
491 /// immediately in that case.
492 fn clear_relocations(
494 cx: &impl HasDataLayout,
497 ) -> InterpResult<'tcx> {
498 // Find the start and end of the given range and its outermost relocations.
499 let (first, last) = {
500 // Find all relocations overlapping the given range.
501 let relocations = self.get_relocations(cx, ptr, size);
502 if relocations.is_empty() {
507 relocations.first().unwrap().0,
508 relocations.last().unwrap().0 + cx.data_layout().pointer_size,
511 let start = ptr.offset;
512 let end = start + size; // `Size` addition
514 // Mark parts of the outermost relocations as undefined if they partially fall outside the
517 self.undef_mask.set_range(first, start, false);
520 self.undef_mask.set_range(end, last, false);
523 // Forget all the relocations.
524 self.relocations.remove_range(first..last);
529 /// Errors if there are relocations overlapping with the edges of the
530 /// given memory range.
532 fn check_relocation_edges(
534 cx: &impl HasDataLayout,
537 ) -> InterpResult<'tcx> {
538 self.check_relocations(cx, ptr, Size::ZERO)?;
539 self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
545 impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
546 /// Checks whether the given range is entirely defined.
548 /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
549 /// at which the first undefined access begins.
550 fn is_defined(&self, ptr: Pointer<Tag>, size: Size) -> Result<(), Size> {
551 self.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition
554 /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
555 /// error which will report the first byte which is undefined.
556 fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
557 self.is_defined(ptr, size)
558 .or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
561 pub fn mark_definedness(&mut self, ptr: Pointer<Tag>, size: Size, new_state: bool) {
562 if size.bytes() == 0 {
565 self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
569 /// Run-length encoding of the undef mask.
570 /// Used to copy parts of a mask multiple times to another allocation.
571 pub struct AllocationDefinedness {
572 /// The definedness of the first range.
574 /// The lengths of ranges that are run-length encoded.
575 /// The definedness of the ranges alternate starting with `initial`.
576 ranges: smallvec::SmallVec<[u64; 1]>,
579 impl AllocationDefinedness {
580 pub fn all_bytes_undef(&self) -> bool {
581 // The `ranges` are run-length encoded and of alternating definedness.
582 // So if `ranges.len() > 1` then the second block is a range of defined.
583 !self.initial && self.ranges.len() == 1
587 /// Transferring the definedness mask to other allocations.
588 impl<Tag, Extra> Allocation<Tag, Extra> {
589 /// Creates a run-length encoding of the undef mask.
590 pub fn compress_undef_range(&self, src: Pointer<Tag>, size: Size) -> AllocationDefinedness {
591 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
592 // a naive undef mask copying algorithm would repeatedly have to read the undef mask from
593 // the source and write it to the destination. Even if we optimized the memory accesses,
594 // we'd be doing all of this `repeat` times.
595 // Therefore we precompute a compressed version of the undef mask of the source value and
596 // then write it back `repeat` times without computing any more information from the source.
598 // A precomputed cache for ranges of defined/undefined bits
599 // 0000010010001110 will become
600 // `[5, 1, 2, 1, 3, 3, 1]`,
601 // where each element toggles the state.
603 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
604 let initial = self.undef_mask.get(src.offset);
606 let mut cur = initial;
608 for i in 1..size.bytes() {
609 // FIXME: optimize to bitshift the current undef block's bits and read the top bit.
610 if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
613 ranges.push(cur_len);
619 ranges.push(cur_len);
621 AllocationDefinedness { ranges, initial }
624 /// Applies multiple instances of the run-length encoding to the undef mask.
625 pub fn mark_compressed_undef_range(
627 defined: &AllocationDefinedness,
632 // An optimization where we can just overwrite an entire range of definedness bits if
633 // they are going to be uniformly `1` or `0`.
634 if defined.ranges.len() <= 1 {
635 self.undef_mask.set_range_inbounds(
637 dest.offset + size * repeat, // `Size` operations
643 for mut j in 0..repeat {
645 j += dest.offset.bytes();
646 let mut cur = defined.initial;
647 for range in &defined.ranges {
650 self.undef_mask.set_range_inbounds(
651 Size::from_bytes(old_j),
662 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
663 pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
665 impl<Tag, Id> Relocations<Tag, Id> {
666 pub fn new() -> Self {
667 Relocations(SortedMap::new())
670 // The caller must guarantee that the given relocations are already sorted
671 // by address and contain no duplicates.
672 pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
673 Relocations(SortedMap::from_presorted_elements(r))
677 impl<Tag> Deref for Relocations<Tag> {
678 type Target = SortedMap<Size, (Tag, AllocId)>;
680 fn deref(&self) -> &Self::Target {
685 impl<Tag> DerefMut for Relocations<Tag> {
686 fn deref_mut(&mut self) -> &mut Self::Target {
691 /// A partial, owned list of relocations to transfer into another allocation.
692 pub struct AllocationRelocations<Tag> {
693 relative_relocations: Vec<(Size, (Tag, AllocId))>,
696 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
697 pub fn prepare_relocation_copy(
699 cx: &impl HasDataLayout,
704 ) -> AllocationRelocations<Tag> {
705 let relocations = self.get_relocations(cx, src, size);
706 if relocations.is_empty() {
707 return AllocationRelocations { relative_relocations: Vec::new() };
710 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
713 new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
714 // compute offset for current repetition
715 let dest_offset = dest.offset + size * i; // `Size` operations
717 // shift offsets from source allocation to destination allocation
718 (offset + dest_offset) - src.offset, // `Size` operations
724 AllocationRelocations { relative_relocations: new_relocations }
727 /// Applies a relocation copy.
728 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
729 /// to be clear of relocations.
730 pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
731 self.relocations.insert_presorted(relocations.relative_relocations);
735 ////////////////////////////////////////////////////////////////////////////////
736 // Undefined byte tracking
737 ////////////////////////////////////////////////////////////////////////////////
741 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
742 /// is defined. If it is `false` the byte is undefined.
743 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
744 #[derive(HashStable)]
745 pub struct UndefMask {
751 pub const BLOCK_SIZE: u64 = 64;
753 pub fn new(size: Size, state: bool) -> Self {
754 let mut m = UndefMask { blocks: vec![], len: Size::ZERO };
759 /// Checks whether the range `start..end` (end-exclusive) is entirely defined.
761 /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
762 /// at which the first undefined access begins.
764 pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> {
766 return Err(self.len);
769 // FIXME(oli-obk): optimize this for allocations larger than a block.
770 let idx = (start.bytes()..end.bytes()).map(Size::from_bytes).find(|&i| !self.get(i));
773 Some(idx) => Err(idx),
778 pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
781 self.grow(end - len, new_state);
783 self.set_range_inbounds(start, end, new_state);
786 pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
787 let (blocka, bita) = bit_index(start);
788 let (blockb, bitb) = bit_index(end);
789 if blocka == blockb {
790 // First set all bits except the first `bita`,
791 // then unset the last `64 - bitb` bits.
792 let range = if bitb == 0 {
795 (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
798 self.blocks[blocka] |= range;
800 self.blocks[blocka] &= !range;
804 // across block boundaries
806 // Set `bita..64` to `1`.
807 self.blocks[blocka] |= u64::MAX << bita;
808 // Set `0..bitb` to `1`.
810 self.blocks[blockb] |= u64::MAX >> (64 - bitb);
812 // Fill in all the other blocks (much faster than one bit at a time).
813 for block in (blocka + 1)..blockb {
814 self.blocks[block] = u64::MAX;
817 // Set `bita..64` to `0`.
818 self.blocks[blocka] &= !(u64::MAX << bita);
819 // Set `0..bitb` to `0`.
821 self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
823 // Fill in all the other blocks (much faster than one bit at a time).
824 for block in (blocka + 1)..blockb {
825 self.blocks[block] = 0;
831 pub fn get(&self, i: Size) -> bool {
832 let (block, bit) = bit_index(i);
833 (self.blocks[block] & (1 << bit)) != 0
837 pub fn set(&mut self, i: Size, new_state: bool) {
838 let (block, bit) = bit_index(i);
839 self.set_bit(block, bit, new_state);
843 fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
845 self.blocks[block] |= 1 << bit;
847 self.blocks[block] &= !(1 << bit);
851 pub fn grow(&mut self, amount: Size, new_state: bool) {
852 if amount.bytes() == 0 {
855 let unused_trailing_bits =
856 u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
857 if amount.bytes() > unused_trailing_bits {
858 let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
860 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
861 iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
864 let start = self.len;
866 self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
871 fn bit_index(bits: Size) -> (usize, usize) {
872 let bits = bits.bytes();
873 let a = bits / UndefMask::BLOCK_SIZE;
874 let b = bits % UndefMask::BLOCK_SIZE;
875 (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())