1 //! The virtual memory representation of the MIR interpreter.
4 Pointer, InterpResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
8 use crate::ty::layout::{Size, Align};
10 use rustc_data_structures::sorted_map::SortedMap;
11 use rustc_target::abi::HasDataLayout;
12 use syntax::ast::Mutability;
14 use std::ops::{Range, Deref, DerefMut};
17 // NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
18 // `src/librustc_mir/interpret/snapshot.rs`.
31 pub struct Allocation<Tag = (),Extra = ()> {
32 /// The actual bytes of the allocation.
33 /// Note that the bytes of a pointer represent the offset of the pointer.
35 /// Maps from byte addresses to extra data for each pointer.
36 /// Only the first byte of a pointer is inserted into the map; i.e.,
37 /// every entry in this map applies to `pointer_size` consecutive bytes starting
38 /// at the given offset.
39 relocations: Relocations<Tag>,
40 /// Denotes which part of this allocation is initialized.
41 undef_mask: UndefMask,
42 /// The size of the allocation. Currently, must always equal `bytes.len()`.
44 /// The alignment of the allocation to detect unaligned reads.
46 /// `true` if the allocation is mutable.
47 /// Also used by codegen to determine if a static should be put into mutable memory,
48 /// which happens for `static mut` and `static` with interior mutability.
49 pub mutability: Mutability,
50 /// Extra state for the machine.
54 pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Clone {
55 // There is no constructor in here because the constructor's type depends
56 // on `MemoryKind`, and making things sufficiently generic leads to painful
59 /// Hook for performing extra checks on a memory read access.
61 /// Takes read-only access to the allocation so we can keep all the memory read
62 /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
66 _alloc: &Allocation<Tag, Self>,
69 ) -> InterpResult<'tcx> {
73 /// Hook for performing extra checks on a memory write access.
76 _alloc: &mut Allocation<Tag, Self>,
79 ) -> InterpResult<'tcx> {
83 /// Hook for performing extra checks on a memory deallocation.
84 /// `size` will be the size of the allocation.
86 fn memory_deallocated(
87 _alloc: &mut Allocation<Tag, Self>,
90 ) -> InterpResult<'tcx> {
95 // For `Tag = ()` and no extra state, we have a trivial implementation.
96 impl AllocationExtra<()> for () { }
98 // The constructors are all without extra; the extra gets added by a machine hook later.
99 impl<Tag> Allocation<Tag> {
100 /// Creates a read-only allocation initialized by the given bytes
101 pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
102 let bytes = slice.into().into_owned();
103 let size = Size::from_bytes(bytes.len() as u64);
106 relocations: Relocations::new(),
107 undef_mask: UndefMask::new(size, true),
110 mutability: Mutability::Immutable,
115 pub fn from_byte_aligned_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
116 Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
119 pub fn undef(size: Size, align: Align) -> Self {
120 assert_eq!(size.bytes() as usize as u64, size.bytes());
122 bytes: vec![0; size.bytes() as usize],
123 relocations: Relocations::new(),
124 undef_mask: UndefMask::new(size, false),
127 mutability: Mutability::Mutable,
133 impl Allocation<(), ()> {
134 /// Add Tag and Extra fields
135 pub fn with_tags_and_extra<T, E>(
137 mut tagger: impl FnMut(AllocId) -> T,
139 ) -> Allocation<T, E> {
143 relocations: Relocations::from_presorted(
144 self.relocations.iter()
145 // The allocations in the relocations (pointers stored *inside* this allocation)
146 // all get the base pointer tag.
147 .map(|&(offset, ((), alloc))| {
148 let tag = tagger(alloc);
149 (offset, (tag, alloc))
153 undef_mask: self.undef_mask,
155 mutability: self.mutability,
161 /// Raw accessors. Provide access to otherwise private bytes.
162 impl<Tag, Extra> Allocation<Tag, Extra> {
163 pub fn len(&self) -> usize {
164 self.size.bytes() as usize
167 /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
168 /// from `get_bytes_with_undef_and_ptr` in that it does no relocation checks (even on the
169 /// edges) at all. It further ignores `AllocationExtra` callbacks.
170 /// This must not be used for reads affecting the interpreter execution.
171 pub fn inspect_with_undef_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
175 /// Returns the undef mask.
176 pub fn undef_mask(&self) -> &UndefMask {
180 /// Returns the relocation list.
181 pub fn relocations(&self) -> &Relocations<Tag> {
186 impl<'tcx> rustc_serialize::UseSpecializedDecodable for &'tcx Allocation {}
189 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
190 /// Just a small local helper function to avoid a bit of code repetition.
191 /// Returns the range of this allocation that was meant.
198 let end = offset + size; // This does overflow checking.
200 end.bytes() as usize as u64, end.bytes(),
201 "cannot handle this access on this host architecture"
203 let end = end.bytes() as usize;
206 "Out-of-bounds access at offset {}, size {} in allocation of size {}",
207 offset.bytes(), size.bytes(), self.len()
209 (offset.bytes() as usize)..end
212 /// The last argument controls whether we error out when there are undefined
213 /// or pointer bytes. You should never call this, call `get_bytes` or
214 /// `get_bytes_with_undef_and_ptr` instead,
216 /// This function also guarantees that the resulting pointer will remain stable
217 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
220 /// It is the caller's responsibility to check bounds and alignment beforehand.
221 fn get_bytes_internal(
223 cx: &impl HasDataLayout,
226 check_defined_and_ptr: bool,
227 ) -> InterpResult<'tcx, &[u8]>
229 let range = self.check_bounds(ptr.offset, size);
231 if check_defined_and_ptr {
232 self.check_defined(ptr, size)?;
233 self.check_relocations(cx, ptr, size)?;
235 // We still don't want relocations on the *edges*.
236 self.check_relocation_edges(cx, ptr, size)?;
239 AllocationExtra::memory_read(self, ptr, size)?;
241 Ok(&self.bytes[range])
244 /// Checks that these bytes are initialized and not pointer bytes, and then return them
247 /// It is the caller's responsibility to check bounds and alignment beforehand.
251 cx: &impl HasDataLayout,
254 ) -> InterpResult<'tcx, &[u8]>
256 self.get_bytes_internal(cx, ptr, size, true)
259 /// It is the caller's responsibility to handle undefined and pointer bytes.
260 /// However, this still checks that there are no relocations on the *edges*.
262 /// It is the caller's responsibility to check bounds and alignment beforehand.
264 pub fn get_bytes_with_undef_and_ptr(
266 cx: &impl HasDataLayout,
269 ) -> InterpResult<'tcx, &[u8]>
271 self.get_bytes_internal(cx, ptr, size, false)
274 /// Just calling this already marks everything as defined and removes relocations,
275 /// so be sure to actually put data there!
277 /// It is the caller's responsibility to check bounds and alignment beforehand.
278 pub fn get_bytes_mut(
280 cx: &impl HasDataLayout,
283 ) -> InterpResult<'tcx, &mut [u8]>
285 let range = self.check_bounds(ptr.offset, size);
287 self.mark_definedness(ptr, size, true);
288 self.clear_relocations(cx, ptr, size)?;
290 AllocationExtra::memory_written(self, ptr, size)?;
292 Ok(&mut self.bytes[range])
296 /// Reading and writing.
297 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
298 /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
299 /// before a `0` is found.
302 cx: &impl HasDataLayout,
304 ) -> InterpResult<'tcx, &[u8]>
306 assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
307 let offset = ptr.offset.bytes() as usize;
308 Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
310 let size_with_null = Size::from_bytes((size + 1) as u64);
311 // Go through `get_bytes` for checks and AllocationExtra hooks.
312 // We read the null, so we include it in the request, but we want it removed
313 // from the result, so we do subslicing.
314 &self.get_bytes(cx, ptr, size_with_null)?[..size]
316 // This includes the case where `offset` is out-of-bounds to begin with.
317 None => throw_unsup!(UnterminatedCString(ptr.erase_tag())),
321 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
322 /// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the
323 /// given range contains neither relocations nor undef bytes.
326 cx: &impl HasDataLayout,
329 allow_ptr_and_undef: bool,
330 ) -> InterpResult<'tcx>
332 // Check bounds and relocations on the edges.
333 self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
334 // Check undef and ptr.
335 if !allow_ptr_and_undef {
336 self.check_defined(ptr, size)?;
337 self.check_relocations(cx, ptr, size)?;
342 /// Writes `src` to the memory starting at `ptr.offset`.
344 /// It is the caller's responsibility to check bounds and alignment beforehand.
347 cx: &impl HasDataLayout,
350 ) -> InterpResult<'tcx>
352 let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(src.len() as u64))?;
353 bytes.clone_from_slice(src);
357 /// Sets `count` bytes starting at `ptr.offset` with `val`. Basically `memset`.
359 /// It is the caller's responsibility to check bounds and alignment beforehand.
362 cx: &impl HasDataLayout,
366 ) -> InterpResult<'tcx>
368 let bytes = self.get_bytes_mut(cx, ptr, count)?;
375 /// Reads a *non-ZST* scalar.
377 /// ZSTs can't be read for two reasons:
378 /// * byte-order cannot work with zero-element buffers;
379 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
380 /// pointers being valid for ZSTs.
382 /// It is the caller's responsibility to check bounds and alignment beforehand.
385 cx: &impl HasDataLayout,
388 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
390 // `get_bytes_unchecked` tests relocation edges.
391 let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
392 // Undef check happens *after* we established that the alignment is correct.
393 // We must not return `Ok()` for unaligned pointers!
394 if self.check_defined(ptr, size).is_err() {
395 // This inflates undefined bytes to the entire scalar, even if only a few
396 // bytes are undefined.
397 return Ok(ScalarMaybeUndef::Undef);
399 // Now we do the actual reading.
400 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
401 // See if we got a pointer.
402 if size != cx.data_layout().pointer_size {
403 // *Now*, we better make sure that the inside is free of relocations too.
404 self.check_relocations(cx, ptr, size)?;
406 match self.relocations.get(&ptr.offset) {
407 Some(&(tag, alloc_id)) => {
408 let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
409 return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
414 // We don't. Just return the bits.
415 Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
418 /// Reads a pointer-sized scalar.
420 /// It is the caller's responsibility to check bounds and alignment beforehand.
421 pub fn read_ptr_sized(
423 cx: &impl HasDataLayout,
425 ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
427 self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
430 /// Writes a *non-ZST* scalar.
432 /// ZSTs can't be read for two reasons:
433 /// * byte-order cannot work with zero-element buffers;
434 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
435 /// pointers being valid for ZSTs.
437 /// It is the caller's responsibility to check bounds and alignment beforehand.
440 cx: &impl HasDataLayout,
442 val: ScalarMaybeUndef<Tag>,
444 ) -> InterpResult<'tcx>
446 let val = match val {
447 ScalarMaybeUndef::Scalar(scalar) => scalar,
448 ScalarMaybeUndef::Undef => {
449 self.mark_definedness(ptr, type_size, false);
454 let bytes = match val.to_bits_or_ptr(type_size, cx) {
455 Err(val) => val.offset.bytes() as u128,
459 let endian = cx.data_layout().endian;
460 let dst = self.get_bytes_mut(cx, ptr, type_size)?;
461 write_target_uint(endian, dst, bytes).unwrap();
463 // See if we have to also write a relocation.
465 Scalar::Ptr(val) => {
466 self.relocations.insert(
468 (val.tag, val.alloc_id),
477 /// Writes a pointer-sized scalar.
479 /// It is the caller's responsibility to check bounds and alignment beforehand.
480 pub fn write_ptr_sized(
482 cx: &impl HasDataLayout,
484 val: ScalarMaybeUndef<Tag>
485 ) -> InterpResult<'tcx>
487 let ptr_size = cx.data_layout().pointer_size;
488 self.write_scalar(cx, ptr.into(), val, ptr_size)
493 impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
494 /// Returns all relocations overlapping with the given pointer-offset pair.
495 pub fn get_relocations(
497 cx: &impl HasDataLayout,
500 ) -> &[(Size, (Tag, AllocId))] {
501 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
502 // the beginning of this range.
503 let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
504 let end = ptr.offset + size; // This does overflow checking.
505 self.relocations.range(Size::from_bytes(start)..end)
508 /// Checks that there are no relocations overlapping with the given range.
510 fn check_relocations(
512 cx: &impl HasDataLayout,
515 ) -> InterpResult<'tcx> {
516 if self.get_relocations(cx, ptr, size).is_empty() {
519 throw_unsup!(ReadPointerAsBytes)
523 /// Removes all relocations inside the given range.
524 /// If there are relocations overlapping with the edges, they
525 /// are removed as well *and* the bytes they cover are marked as
526 /// uninitialized. This is a somewhat odd "spooky action at a distance",
527 /// but it allows strictly more code to run than if we would just error
528 /// immediately in that case.
529 fn clear_relocations(
531 cx: &impl HasDataLayout,
534 ) -> InterpResult<'tcx> {
535 // Find the start and end of the given range and its outermost relocations.
536 let (first, last) = {
537 // Find all relocations overlapping the given range.
538 let relocations = self.get_relocations(cx, ptr, size);
539 if relocations.is_empty() {
543 (relocations.first().unwrap().0,
544 relocations.last().unwrap().0 + cx.data_layout().pointer_size)
546 let start = ptr.offset;
547 let end = start + size;
549 // Mark parts of the outermost relocations as undefined if they partially fall outside the
552 self.undef_mask.set_range(first, start, false);
555 self.undef_mask.set_range(end, last, false);
558 // Forget all the relocations.
559 self.relocations.remove_range(first..last);
564 /// Errors if there are relocations overlapping with the edges of the
565 /// given memory range.
567 fn check_relocation_edges(
569 cx: &impl HasDataLayout,
572 ) -> InterpResult<'tcx> {
573 self.check_relocations(cx, ptr, Size::ZERO)?;
574 self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
581 impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
582 /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
583 /// error which will report the first byte which is undefined.
585 fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
586 self.undef_mask.is_range_defined(
589 ).or_else(|idx| throw_unsup!(ReadUndefBytes(idx)))
592 pub fn mark_definedness(
598 if size.bytes() == 0 {
601 self.undef_mask.set_range(
609 /// Run-length encoding of the undef mask.
610 /// Used to copy parts of a mask multiple times to another allocation.
611 pub struct AllocationDefinedness {
612 /// The definedness of the first range.
614 /// The lengths of ranges that are run-length encoded.
615 /// The definedness of the ranges alternate starting with `initial`.
616 ranges: smallvec::SmallVec::<[u64; 1]>,
619 /// Transferring the definedness mask to other allocations.
620 impl<Tag, Extra> Allocation<Tag, Extra> {
621 /// Creates a run-length encoding of the undef mask.
622 pub fn compress_undef_range(
626 ) -> AllocationDefinedness {
627 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
628 // a naive undef mask copying algorithm would repeatedly have to read the undef mask from
629 // the source and write it to the destination. Even if we optimized the memory accesses,
630 // we'd be doing all of this `repeat` times.
631 // Therefor we precompute a compressed version of the undef mask of the source value and
632 // then write it back `repeat` times without computing any more information from the source.
634 // A precomputed cache for ranges of defined/undefined bits
635 // 0000010010001110 will become
636 // `[5, 1, 2, 1, 3, 3, 1]`,
637 // where each element toggles the state.
639 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
640 let initial = self.undef_mask.get(src.offset);
642 let mut cur = initial;
644 for i in 1..size.bytes() {
645 // FIXME: optimize to bitshift the current undef block's bits and read the top bit.
646 if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
649 ranges.push(cur_len);
655 ranges.push(cur_len);
657 AllocationDefinedness { ranges, initial, }
660 /// Applies multiple instances of the run-length encoding to the undef mask.
661 pub fn mark_compressed_undef_range(
663 defined: &AllocationDefinedness,
668 // An optimization where we can just overwrite an entire range of definedness bits if
669 // they are going to be uniformly `1` or `0`.
670 if defined.ranges.len() <= 1 {
671 self.undef_mask.set_range_inbounds(
673 dest.offset + size * repeat,
679 for mut j in 0..repeat {
681 j += dest.offset.bytes();
682 let mut cur = defined.initial;
683 for range in &defined.ranges {
686 self.undef_mask.set_range_inbounds(
687 Size::from_bytes(old_j),
698 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
699 pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
701 impl<Tag, Id> Relocations<Tag, Id> {
702 pub fn new() -> Self {
703 Relocations(SortedMap::new())
706 // The caller must guarantee that the given relocations are already sorted
707 // by address and contain no duplicates.
708 pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
709 Relocations(SortedMap::from_presorted_elements(r))
713 impl<Tag> Deref for Relocations<Tag> {
714 type Target = SortedMap<Size, (Tag, AllocId)>;
716 fn deref(&self) -> &Self::Target {
721 impl<Tag> DerefMut for Relocations<Tag> {
722 fn deref_mut(&mut self) -> &mut Self::Target {
727 /// A partial, owned list of relocations to transfer into another allocation.
728 pub struct AllocationRelocations<Tag> {
729 relative_relocations: Vec<(Size, (Tag, AllocId))>,
732 impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
733 pub fn prepare_relocation_copy(
735 cx: &impl HasDataLayout,
740 ) -> AllocationRelocations<Tag> {
741 let relocations = self.get_relocations(cx, src, size);
742 if relocations.is_empty() {
743 return AllocationRelocations { relative_relocations: Vec::new() };
746 let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
749 new_relocations.extend(
752 .map(|&(offset, reloc)| {
753 // compute offset for current repetition
754 let dest_offset = dest.offset + (i * size);
756 // shift offsets from source allocation to destination allocation
757 offset + dest_offset - src.offset,
764 AllocationRelocations {
765 relative_relocations: new_relocations,
769 /// Applies a relocation copy.
770 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
771 /// to be clear of relocations.
772 pub fn mark_relocation_range(
774 relocations: AllocationRelocations<Tag>,
776 self.relocations.insert_presorted(relocations.relative_relocations);
780 ////////////////////////////////////////////////////////////////////////////////
781 // Undefined byte tracking
782 ////////////////////////////////////////////////////////////////////////////////
786 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
787 /// is defined. If it is `false` the byte is undefined.
788 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
789 pub struct UndefMask {
794 impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len});
797 pub const BLOCK_SIZE: u64 = 64;
799 pub fn new(size: Size, state: bool) -> Self {
800 let mut m = UndefMask {
808 /// Checks whether the range `start..end` (end-exclusive) is entirely defined.
810 /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
811 /// at which the first undefined access begins.
813 pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> {
815 return Err(self.len);
818 // FIXME(oli-obk): optimize this for allocations larger than a block.
819 let idx = (start.bytes()..end.bytes())
820 .map(|i| Size::from_bytes(i))
821 .find(|&i| !self.get(i));
824 Some(idx) => Err(idx),
829 pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
832 self.grow(end - len, new_state);
834 self.set_range_inbounds(start, end, new_state);
837 pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
838 let (blocka, bita) = bit_index(start);
839 let (blockb, bitb) = bit_index(end);
840 if blocka == blockb {
841 // First set all bits except the first `bita`,
842 // then unset the last `64 - bitb` bits.
843 let range = if bitb == 0 {
844 u64::max_value() << bita
846 (u64::max_value() << bita) & (u64::max_value() >> (64 - bitb))
849 self.blocks[blocka] |= range;
851 self.blocks[blocka] &= !range;
855 // across block boundaries
857 // Set `bita..64` to `1`.
858 self.blocks[blocka] |= u64::max_value() << bita;
859 // Set `0..bitb` to `1`.
861 self.blocks[blockb] |= u64::max_value() >> (64 - bitb);
863 // Fill in all the other blocks (much faster than one bit at a time).
864 for block in (blocka + 1) .. blockb {
865 self.blocks[block] = u64::max_value();
868 // Set `bita..64` to `0`.
869 self.blocks[blocka] &= !(u64::max_value() << bita);
870 // Set `0..bitb` to `0`.
872 self.blocks[blockb] &= !(u64::max_value() >> (64 - bitb));
874 // Fill in all the other blocks (much faster than one bit at a time).
875 for block in (blocka + 1) .. blockb {
876 self.blocks[block] = 0;
882 pub fn get(&self, i: Size) -> bool {
883 let (block, bit) = bit_index(i);
884 (self.blocks[block] & (1 << bit)) != 0
888 pub fn set(&mut self, i: Size, new_state: bool) {
889 let (block, bit) = bit_index(i);
890 self.set_bit(block, bit, new_state);
894 fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
896 self.blocks[block] |= 1 << bit;
898 self.blocks[block] &= !(1 << bit);
902 pub fn grow(&mut self, amount: Size, new_state: bool) {
903 if amount.bytes() == 0 {
906 let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
907 if amount.bytes() > unused_trailing_bits {
908 let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
909 assert_eq!(additional_blocks as usize as u64, additional_blocks);
911 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
912 iter::repeat(0).take(additional_blocks as usize),
915 let start = self.len;
917 self.set_range_inbounds(start, start + amount, new_state);
922 fn bit_index(bits: Size) -> (usize, usize) {
923 let bits = bits.bytes();
924 let a = bits / UndefMask::BLOCK_SIZE;
925 let b = bits % UndefMask::BLOCK_SIZE;
926 assert_eq!(a as usize as u64, a);
927 assert_eq!(b as usize as u64, b);
928 (a as usize, b as usize)