1 use crate::vec::{Idx, IndexVec};
2 use arrayvec::ArrayVec;
5 use std::marker::PhantomData;
7 use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
11 use rustc_macros::{Decodable, Encodable};
19 const WORD_BYTES: usize = mem::size_of::<Word>();
20 const WORD_BITS: usize = WORD_BYTES * 8;
22 // The choice of chunk size has some trade-offs.
24 // A big chunk size tends to favour cases where many large `ChunkedBitSet`s are
25 // present, because they require fewer `Chunk`s, reducing the number of
26 // allocations and reducing peak memory usage. Also, fewer chunk operations are
27 // required, though more of them might be `Mixed`.
29 // A small chunk size tends to favour cases where many small `ChunkedBitSet`s
30 // are present, because less space is wasted at the end of the final chunk (if
32 const CHUNK_WORDS: usize = 32;
33 const CHUNK_BITS: usize = CHUNK_WORDS * WORD_BITS; // 2048 bits
35 /// ChunkSize is small to keep `Chunk` small. The static assertion ensures it's
38 const _: () = assert!(CHUNK_BITS <= ChunkSize::MAX as usize);
40 pub trait BitRelations<Rhs> {
41 fn union(&mut self, other: &Rhs) -> bool;
42 fn subtract(&mut self, other: &Rhs) -> bool;
43 fn intersect(&mut self, other: &Rhs) -> bool;
47 fn inclusive_start_end<T: Idx>(
48 range: impl RangeBounds<T>,
50 ) -> Option<(usize, usize)> {
51 // Both start and end are inclusive.
52 let start = match range.start_bound().cloned() {
53 Bound::Included(start) => start.index(),
54 Bound::Excluded(start) => start.index() + 1,
55 Bound::Unbounded => 0,
57 let end = match range.end_bound().cloned() {
58 Bound::Included(end) => end.index(),
59 Bound::Excluded(end) => end.index().checked_sub(1)?,
60 Bound::Unbounded => domain - 1,
62 assert!(end < domain);
69 macro_rules! bit_relations_inherent_impls {
71 /// Sets `self = self | other` and returns `true` if `self` changed
72 /// (i.e., if new bits were added).
73 pub fn union<Rhs>(&mut self, other: &Rhs) -> bool
75 Self: BitRelations<Rhs>,
77 <Self as BitRelations<Rhs>>::union(self, other)
80 /// Sets `self = self - other` and returns `true` if `self` changed.
81 /// (i.e., if any bits were removed).
82 pub fn subtract<Rhs>(&mut self, other: &Rhs) -> bool
84 Self: BitRelations<Rhs>,
86 <Self as BitRelations<Rhs>>::subtract(self, other)
89 /// Sets `self = self & other` and return `true` if `self` changed.
90 /// (i.e., if any bits were removed).
91 pub fn intersect<Rhs>(&mut self, other: &Rhs) -> bool
93 Self: BitRelations<Rhs>,
95 <Self as BitRelations<Rhs>>::intersect(self, other)
100 /// A fixed-size bitset type with a dense representation.
102 /// NOTE: Use [`GrowableBitSet`] if you need support for resizing after creation.
104 /// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
107 /// All operations that involve an element will panic if the element is equal
108 /// to or greater than the domain size. All operations that involve two bitsets
109 /// will panic if the bitsets have differing domain sizes.
111 #[derive(Eq, PartialEq, Hash, Decodable, Encodable)]
112 pub struct BitSet<T> {
115 marker: PhantomData<T>,
119 /// Gets the domain size.
120 pub fn domain_size(&self) -> usize {
125 impl<T: Idx> BitSet<T> {
126 /// Creates a new, empty bitset with a given `domain_size`.
128 pub fn new_empty(domain_size: usize) -> BitSet<T> {
129 let num_words = num_words(domain_size);
130 BitSet { domain_size, words: vec![0; num_words], marker: PhantomData }
133 /// Creates a new, filled bitset with a given `domain_size`.
135 pub fn new_filled(domain_size: usize) -> BitSet<T> {
136 let num_words = num_words(domain_size);
137 let mut result = BitSet { domain_size, words: vec![!0; num_words], marker: PhantomData };
138 result.clear_excess_bits();
142 /// Clear all elements.
144 pub fn clear(&mut self) {
148 /// Clear excess bits in the final word.
149 fn clear_excess_bits(&mut self) {
150 clear_excess_bits_in_final_word(self.domain_size, &mut self.words);
153 /// Count the number of set bits in the set.
154 pub fn count(&self) -> usize {
155 self.words.iter().map(|e| e.count_ones() as usize).sum()
158 /// Returns `true` if `self` contains `elem`.
160 pub fn contains(&self, elem: T) -> bool {
161 assert!(elem.index() < self.domain_size);
162 let (word_index, mask) = word_index_and_mask(elem);
163 (self.words[word_index] & mask) != 0
166 /// Is `self` is a (non-strict) superset of `other`?
168 pub fn superset(&self, other: &BitSet<T>) -> bool {
169 assert_eq!(self.domain_size, other.domain_size);
170 self.words.iter().zip(&other.words).all(|(a, b)| (a & b) == *b)
173 /// Is the set empty?
175 pub fn is_empty(&self) -> bool {
176 self.words.iter().all(|a| *a == 0)
179 /// Insert `elem`. Returns whether the set has changed.
181 pub fn insert(&mut self, elem: T) -> bool {
182 assert!(elem.index() < self.domain_size);
183 let (word_index, mask) = word_index_and_mask(elem);
184 let word_ref = &mut self.words[word_index];
185 let word = *word_ref;
186 let new_word = word | mask;
187 *word_ref = new_word;
192 pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
193 let Some((start, end)) = inclusive_start_end(elems, self.domain_size) else {
197 let (start_word_index, start_mask) = word_index_and_mask(start);
198 let (end_word_index, end_mask) = word_index_and_mask(end);
200 // Set all words in between start and end (exclusively of both).
201 for word_index in (start_word_index + 1)..end_word_index {
202 self.words[word_index] = !0;
205 if start_word_index != end_word_index {
206 // Start and end are in different words, so we handle each in turn.
208 // We set all leading bits. This includes the start_mask bit.
209 self.words[start_word_index] |= !(start_mask - 1);
210 // And all trailing bits (i.e. from 0..=end) in the end word,
211 // including the end.
212 self.words[end_word_index] |= end_mask | end_mask - 1;
214 self.words[start_word_index] |= end_mask | (end_mask - start_mask);
218 /// Sets all bits to true.
219 pub fn insert_all(&mut self) {
221 self.clear_excess_bits();
224 /// Returns `true` if the set has changed.
226 pub fn remove(&mut self, elem: T) -> bool {
227 assert!(elem.index() < self.domain_size);
228 let (word_index, mask) = word_index_and_mask(elem);
229 let word_ref = &mut self.words[word_index];
230 let word = *word_ref;
231 let new_word = word & !mask;
232 *word_ref = new_word;
236 /// Gets a slice of the underlying words.
237 pub fn words(&self) -> &[Word] {
241 /// Iterates over the indices of set bits in a sorted order.
243 pub fn iter(&self) -> BitIter<'_, T> {
244 BitIter::new(&self.words)
247 /// Duplicates the set as a hybrid set.
248 pub fn to_hybrid(&self) -> HybridBitSet<T> {
249 // Note: we currently don't bother trying to make a Sparse set.
250 HybridBitSet::Dense(self.to_owned())
253 /// Set `self = self | other`. In contrast to `union` returns `true` if the set contains at
254 /// least one bit that is not in `other` (i.e. `other` is not a superset of `self`).
256 /// This is an optimization for union of a hybrid bitset.
257 fn reverse_union_sparse(&mut self, sparse: &SparseBitSet<T>) -> bool {
258 assert!(sparse.domain_size == self.domain_size);
259 self.clear_excess_bits();
261 let mut not_already = false;
262 // Index of the current word not yet merged.
263 let mut current_index = 0;
264 // Mask of bits that came from the sparse set in the current word.
265 let mut new_bit_mask = 0;
266 for (word_index, mask) in sparse.iter().map(|x| word_index_and_mask(*x)) {
267 // Next bit is in a word not inspected yet.
268 if word_index > current_index {
269 self.words[current_index] |= new_bit_mask;
270 // Were there any bits in the old word that did not occur in the sparse set?
271 not_already |= (self.words[current_index] ^ new_bit_mask) != 0;
272 // Check all words we skipped for any set bit.
273 not_already |= self.words[current_index + 1..word_index].iter().any(|&x| x != 0);
275 current_index = word_index;
276 // Reset bit mask, no bits have been merged yet.
279 // Add bit and mark it as coming from the sparse set.
280 // self.words[word_index] |= mask;
281 new_bit_mask |= mask;
283 self.words[current_index] |= new_bit_mask;
284 // Any bits in the last inspected word that were not in the sparse set?
285 not_already |= (self.words[current_index] ^ new_bit_mask) != 0;
286 // Any bits in the tail? Note `clear_excess_bits` before.
287 not_already |= self.words[current_index + 1..].iter().any(|&x| x != 0);
292 fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
293 let (start, end) = inclusive_start_end(range, self.domain_size)?;
294 let (start_word_index, _) = word_index_and_mask(start);
295 let (end_word_index, end_mask) = word_index_and_mask(end);
297 let end_word = self.words[end_word_index] & (end_mask | (end_mask - 1));
299 let pos = max_bit(end_word) + WORD_BITS * end_word_index;
301 return Some(T::new(pos));
305 // We exclude end_word_index from the range here, because we don't want
306 // to limit ourselves to *just* the last word: the bits set it in may be
307 // after `end`, so it may not work out.
308 if let Some(offset) =
309 self.words[start_word_index..end_word_index].iter().rposition(|&w| w != 0)
311 let word_idx = start_word_index + offset;
312 let start_word = self.words[word_idx];
313 let pos = max_bit(start_word) + WORD_BITS * word_idx;
315 return Some(T::new(pos));
322 bit_relations_inherent_impls! {}
326 impl<T: Idx> BitRelations<BitSet<T>> for BitSet<T> {
327 fn union(&mut self, other: &BitSet<T>) -> bool {
328 assert_eq!(self.domain_size, other.domain_size);
329 bitwise(&mut self.words, &other.words, |a, b| a | b)
332 fn subtract(&mut self, other: &BitSet<T>) -> bool {
333 assert_eq!(self.domain_size, other.domain_size);
334 bitwise(&mut self.words, &other.words, |a, b| a & !b)
337 fn intersect(&mut self, other: &BitSet<T>) -> bool {
338 assert_eq!(self.domain_size, other.domain_size);
339 bitwise(&mut self.words, &other.words, |a, b| a & b)
343 /// A fixed-size bitset type with a partially dense, partially sparse
344 /// representation. The bitset is broken into chunks, and chunks that are all
345 /// zeros or all ones are represented and handled very efficiently.
347 /// This type is especially efficient for sets that typically have a large
348 /// `domain_size` with significant stretches of all zeros or all ones, and also
349 /// some stretches with lots of 0s and 1s mixed in a way that causes trouble
350 /// for `IntervalSet`.
352 /// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
355 /// All operations that involve an element will panic if the element is equal
356 /// to or greater than the domain size. All operations that involve two bitsets
357 /// will panic if the bitsets have differing domain sizes.
358 #[derive(Debug, PartialEq, Eq)]
359 pub struct ChunkedBitSet<T> {
362 /// The chunks. Each one contains exactly CHUNK_BITS values, except the
363 /// last one which contains 1..=CHUNK_BITS values.
364 chunks: Box<[Chunk]>,
366 marker: PhantomData<T>,
369 // Note: the chunk domain size is duplicated in each variant. This is a bit
370 // inconvenient, but it allows the type size to be smaller than if we had an
371 // outer struct containing a chunk domain size plus the `Chunk`, because the
372 // compiler can place the chunk domain size after the tag.
373 #[derive(Clone, Debug, PartialEq, Eq)]
375 /// A chunk that is all zeros; we don't represent the zeros explicitly.
378 /// A chunk that is all ones; we don't represent the ones explicitly.
381 /// A chunk that has a mix of zeros and ones, which are represented
382 /// explicitly and densely. It never has all zeros or all ones.
384 /// If this is the final chunk there may be excess, unused words. This
385 /// turns out to be both simpler and have better performance than
386 /// allocating the minimum number of words, largely because we avoid having
387 /// to store the length, which would make this type larger. These excess
388 /// words are always be zero, as are any excess bits in the final in-use
391 /// The second field is the count of 1s set in the chunk, and must satisfy
392 /// `0 < count < chunk_domain_size`.
394 /// The words are within an `Rc` because it's surprisingly common to
395 /// duplicate an entire chunk, e.g. in `ChunkedBitSet::clone_from()`, or
396 /// when a `Mixed` chunk is union'd into a `Zeros` chunk. When we do need
397 /// to modify a chunk we use `Rc::make_mut`.
398 Mixed(ChunkSize, ChunkSize, Rc<[Word; CHUNK_WORDS]>),
401 // This type is used a lot. Make sure it doesn't unintentionally get bigger.
402 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
403 crate::static_assert_size!(Chunk, 16);
405 impl<T> ChunkedBitSet<T> {
406 pub fn domain_size(&self) -> usize {
411 fn assert_valid(&self) {
412 if self.domain_size == 0 {
413 assert!(self.chunks.is_empty());
417 assert!((self.chunks.len() - 1) * CHUNK_BITS <= self.domain_size);
418 assert!(self.chunks.len() * CHUNK_BITS >= self.domain_size);
419 for chunk in self.chunks.iter() {
420 chunk.assert_valid();
425 impl<T: Idx> ChunkedBitSet<T> {
426 /// Creates a new bitset with a given `domain_size` and chunk kind.
427 fn new(domain_size: usize, is_empty: bool) -> Self {
428 let chunks = if domain_size == 0 {
431 // All the chunks have a chunk_domain_size of `CHUNK_BITS` except
433 let final_chunk_domain_size = {
434 let n = domain_size % CHUNK_BITS;
435 if n == 0 { CHUNK_BITS } else { n }
438 vec![Chunk::new(CHUNK_BITS, is_empty); num_chunks(domain_size)].into_boxed_slice();
439 *chunks.last_mut().unwrap() = Chunk::new(final_chunk_domain_size, is_empty);
442 ChunkedBitSet { domain_size, chunks, marker: PhantomData }
445 /// Creates a new, empty bitset with a given `domain_size`.
447 pub fn new_empty(domain_size: usize) -> Self {
448 ChunkedBitSet::new(domain_size, /* is_empty */ true)
451 /// Creates a new, filled bitset with a given `domain_size`.
453 pub fn new_filled(domain_size: usize) -> Self {
454 ChunkedBitSet::new(domain_size, /* is_empty */ false)
458 fn chunks(&self) -> &[Chunk] {
462 /// Count the number of bits in the set.
463 pub fn count(&self) -> usize {
464 self.chunks.iter().map(|chunk| chunk.count()).sum()
467 /// Returns `true` if `self` contains `elem`.
469 pub fn contains(&self, elem: T) -> bool {
470 assert!(elem.index() < self.domain_size);
471 let chunk = &self.chunks[chunk_index(elem)];
475 Mixed(_, _, words) => {
476 let (word_index, mask) = chunk_word_index_and_mask(elem);
477 (words[word_index] & mask) != 0
483 pub fn iter(&self) -> ChunkedBitIter<'_, T> {
484 ChunkedBitIter::new(self)
487 /// Insert `elem`. Returns whether the set has changed.
488 pub fn insert(&mut self, elem: T) -> bool {
489 assert!(elem.index() < self.domain_size);
490 let chunk_index = chunk_index(elem);
491 let chunk = &mut self.chunks[chunk_index];
493 Zeros(chunk_domain_size) => {
494 if chunk_domain_size > 1 {
495 // We take some effort to avoid copying the words.
496 let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
497 // SAFETY: `words` can safely be all zeroes.
498 let mut words = unsafe { words.assume_init() };
499 let words_ref = Rc::get_mut(&mut words).unwrap();
501 let (word_index, mask) = chunk_word_index_and_mask(elem);
502 words_ref[word_index] |= mask;
503 *chunk = Mixed(chunk_domain_size, 1, words);
505 *chunk = Ones(chunk_domain_size);
510 Mixed(chunk_domain_size, ref mut count, ref mut words) => {
511 // We skip all the work if the bit is already set.
512 let (word_index, mask) = chunk_word_index_and_mask(elem);
513 if (words[word_index] & mask) == 0 {
515 if *count < chunk_domain_size {
516 let words = Rc::make_mut(words);
517 words[word_index] |= mask;
519 *chunk = Ones(chunk_domain_size);
529 /// Sets all bits to true.
530 pub fn insert_all(&mut self) {
531 for chunk in self.chunks.iter_mut() {
532 *chunk = match *chunk {
533 Zeros(chunk_domain_size)
534 | Ones(chunk_domain_size)
535 | Mixed(chunk_domain_size, ..) => Ones(chunk_domain_size),
540 /// Returns `true` if the set has changed.
541 pub fn remove(&mut self, elem: T) -> bool {
542 assert!(elem.index() < self.domain_size);
543 let chunk_index = chunk_index(elem);
544 let chunk = &mut self.chunks[chunk_index];
547 Ones(chunk_domain_size) => {
548 if chunk_domain_size > 1 {
549 // We take some effort to avoid copying the words.
550 let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
551 // SAFETY: `words` can safely be all zeroes.
552 let mut words = unsafe { words.assume_init() };
553 let words_ref = Rc::get_mut(&mut words).unwrap();
555 // Set only the bits in use.
556 let num_words = num_words(chunk_domain_size as usize);
557 words_ref[..num_words].fill(!0);
558 clear_excess_bits_in_final_word(
559 chunk_domain_size as usize,
560 &mut words_ref[..num_words],
562 let (word_index, mask) = chunk_word_index_and_mask(elem);
563 words_ref[word_index] &= !mask;
564 *chunk = Mixed(chunk_domain_size, chunk_domain_size - 1, words);
566 *chunk = Zeros(chunk_domain_size);
570 Mixed(chunk_domain_size, ref mut count, ref mut words) => {
571 // We skip all the work if the bit is already clear.
572 let (word_index, mask) = chunk_word_index_and_mask(elem);
573 if (words[word_index] & mask) != 0 {
576 let words = Rc::make_mut(words);
577 words[word_index] &= !mask;
579 *chunk = Zeros(chunk_domain_size);
589 bit_relations_inherent_impls! {}
592 impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
593 fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
594 assert_eq!(self.domain_size, other.domain_size);
595 debug_assert_eq!(self.chunks.len(), other.chunks.len());
597 let mut changed = false;
598 for (mut self_chunk, other_chunk) in self.chunks.iter_mut().zip(other.chunks.iter()) {
599 match (&mut self_chunk, &other_chunk) {
600 (_, Zeros(_)) | (Ones(_), _) => {}
601 (Zeros(self_chunk_domain_size), Ones(other_chunk_domain_size))
602 | (Mixed(self_chunk_domain_size, ..), Ones(other_chunk_domain_size))
603 | (Zeros(self_chunk_domain_size), Mixed(other_chunk_domain_size, ..)) => {
604 // `other_chunk` fully overwrites `self_chunk`
605 debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
606 *self_chunk = other_chunk.clone();
611 self_chunk_domain_size,
612 ref mut self_chunk_count,
613 ref mut self_chunk_words,
615 Mixed(_other_chunk_domain_size, _other_chunk_count, other_chunk_words),
617 // First check if the operation would change
618 // `self_chunk.words`. If not, we can avoid allocating some
619 // words, and this happens often enough that it's a
620 // performance win. Also, we only need to operate on the
621 // in-use words, hence the slicing.
622 let op = |a, b| a | b;
623 let num_words = num_words(*self_chunk_domain_size as usize);
625 &self_chunk_words[0..num_words],
626 &other_chunk_words[0..num_words],
629 let self_chunk_words = Rc::make_mut(self_chunk_words);
630 let has_changed = bitwise(
631 &mut self_chunk_words[0..num_words],
632 &other_chunk_words[0..num_words],
635 debug_assert!(has_changed);
636 *self_chunk_count = self_chunk_words[0..num_words]
638 .map(|w| w.count_ones() as ChunkSize)
640 if *self_chunk_count == *self_chunk_domain_size {
641 *self_chunk = Ones(*self_chunk_domain_size);
651 fn subtract(&mut self, _other: &ChunkedBitSet<T>) -> bool {
652 unimplemented!("implement if/when necessary");
655 fn intersect(&mut self, _other: &ChunkedBitSet<T>) -> bool {
656 unimplemented!("implement if/when necessary");
660 impl<T: Idx> BitRelations<HybridBitSet<T>> for ChunkedBitSet<T> {
661 fn union(&mut self, other: &HybridBitSet<T>) -> bool {
662 // FIXME: This is slow if `other` is dense, but it hasn't been a problem
663 // in practice so far.
664 // If a faster implementation of this operation is required, consider
665 // reopening https://github.com/rust-lang/rust/pull/94625
666 assert_eq!(self.domain_size, other.domain_size());
667 sequential_update(|elem| self.insert(elem), other.iter())
670 fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
671 // FIXME: This is slow if `other` is dense, but it hasn't been a problem
672 // in practice so far.
673 // If a faster implementation of this operation is required, consider
674 // reopening https://github.com/rust-lang/rust/pull/94625
675 assert_eq!(self.domain_size, other.domain_size());
676 sequential_update(|elem| self.remove(elem), other.iter())
679 fn intersect(&mut self, _other: &HybridBitSet<T>) -> bool {
680 unimplemented!("implement if/when necessary");
684 impl<T> Clone for ChunkedBitSet<T> {
685 fn clone(&self) -> Self {
687 domain_size: self.domain_size,
688 chunks: self.chunks.clone(),
693 /// WARNING: this implementation of clone_from will panic if the two
694 /// bitsets have different domain sizes. This constraint is not inherent to
695 /// `clone_from`, but it works with the existing call sites and allows a
696 /// faster implementation, which is important because this function is hot.
697 fn clone_from(&mut self, from: &Self) {
698 assert_eq!(self.domain_size, from.domain_size);
699 debug_assert_eq!(self.chunks.len(), from.chunks.len());
701 self.chunks.clone_from(&from.chunks)
705 pub struct ChunkedBitIter<'a, T: Idx> {
707 bitset: &'a ChunkedBitSet<T>,
710 impl<'a, T: Idx> ChunkedBitIter<'a, T> {
712 fn new(bitset: &'a ChunkedBitSet<T>) -> ChunkedBitIter<'a, T> {
713 ChunkedBitIter { index: 0, bitset }
717 impl<'a, T: Idx> Iterator for ChunkedBitIter<'a, T> {
719 fn next(&mut self) -> Option<T> {
720 while self.index < self.bitset.domain_size() {
721 let elem = T::new(self.index);
722 let chunk = &self.bitset.chunks[chunk_index(elem)];
724 Zeros(chunk_domain_size) => {
725 self.index += *chunk_domain_size as usize;
727 Ones(_chunk_domain_size) => {
731 Mixed(_chunk_domain_size, _, words) => loop {
732 let elem = T::new(self.index);
734 let (word_index, mask) = chunk_word_index_and_mask(elem);
735 if (words[word_index] & mask) != 0 {
738 if self.index % CHUNK_BITS == 0 {
750 fn assert_valid(&self) {
752 Zeros(chunk_domain_size) | Ones(chunk_domain_size) => {
753 assert!(chunk_domain_size as usize <= CHUNK_BITS);
755 Mixed(chunk_domain_size, count, ref words) => {
756 assert!(chunk_domain_size as usize <= CHUNK_BITS);
757 assert!(0 < count && count < chunk_domain_size);
759 // Check the number of set bits matches `count`.
761 words.iter().map(|w| w.count_ones() as ChunkSize).sum::<ChunkSize>(),
765 // Check the not-in-use words are all zeroed.
766 let num_words = num_words(chunk_domain_size as usize);
767 if num_words < CHUNK_WORDS {
771 .map(|w| w.count_ones() as ChunkSize)
780 fn new(chunk_domain_size: usize, is_empty: bool) -> Self {
781 debug_assert!(chunk_domain_size <= CHUNK_BITS);
782 let chunk_domain_size = chunk_domain_size as ChunkSize;
783 if is_empty { Zeros(chunk_domain_size) } else { Ones(chunk_domain_size) }
786 /// Count the number of 1s in the chunk.
787 fn count(&self) -> usize {
790 Ones(chunk_domain_size) => chunk_domain_size as usize,
791 Mixed(_, count, _) => count as usize,
796 // Applies a function to mutate a bitset, and returns true if any
797 // of the applications return true
798 fn sequential_update<T: Idx>(
799 mut self_update: impl FnMut(T) -> bool,
800 it: impl Iterator<Item = T>,
802 let mut changed = false;
804 changed |= self_update(elem);
809 // Optimization of intersection for SparseBitSet that's generic
811 fn sparse_intersect<T: Idx>(
812 set: &mut SparseBitSet<T>,
813 other_contains: impl Fn(&T) -> bool,
815 let size = set.elems.len();
816 set.elems.retain(|elem| other_contains(elem));
817 set.elems.len() != size
820 // Optimization of dense/sparse intersection. The resulting set is
821 // guaranteed to be at most the size of the sparse set, and hence can be
822 // represented as a sparse set. Therefore the sparse set is copied and filtered,
823 // then returned as the new set.
824 fn dense_sparse_intersect<T: Idx>(
826 sparse: &SparseBitSet<T>,
827 ) -> (SparseBitSet<T>, bool) {
828 let mut sparse_copy = sparse.clone();
829 sparse_intersect(&mut sparse_copy, |el| dense.contains(*el));
830 let n = sparse_copy.len();
831 (sparse_copy, n != dense.count())
835 impl<T: Idx> BitRelations<BitSet<T>> for HybridBitSet<T> {
836 fn union(&mut self, other: &BitSet<T>) -> bool {
837 assert_eq!(self.domain_size(), other.domain_size);
839 HybridBitSet::Sparse(sparse) => {
840 // `self` is sparse and `other` is dense. To
841 // merge them, we have two available strategies:
842 // * Densify `self` then merge other
843 // * Clone other then integrate bits from `self`
844 // The second strategy requires dedicated method
845 // since the usual `union` returns the wrong
846 // result. In the dedicated case the computation
847 // is slightly faster if the bits of the sparse
848 // bitset map to only few words of the dense
849 // representation, i.e. indices are near each
852 // Benchmarking seems to suggest that the second
853 // option is worth it.
854 let mut new_dense = other.clone();
855 let changed = new_dense.reverse_union_sparse(sparse);
856 *self = HybridBitSet::Dense(new_dense);
860 HybridBitSet::Dense(dense) => dense.union(other),
864 fn subtract(&mut self, other: &BitSet<T>) -> bool {
865 assert_eq!(self.domain_size(), other.domain_size);
867 HybridBitSet::Sparse(sparse) => {
868 sequential_update(|elem| sparse.remove(elem), other.iter())
870 HybridBitSet::Dense(dense) => dense.subtract(other),
874 fn intersect(&mut self, other: &BitSet<T>) -> bool {
875 assert_eq!(self.domain_size(), other.domain_size);
877 HybridBitSet::Sparse(sparse) => sparse_intersect(sparse, |elem| other.contains(*elem)),
878 HybridBitSet::Dense(dense) => dense.intersect(other),
884 impl<T: Idx> BitRelations<HybridBitSet<T>> for BitSet<T> {
885 fn union(&mut self, other: &HybridBitSet<T>) -> bool {
886 assert_eq!(self.domain_size, other.domain_size());
888 HybridBitSet::Sparse(sparse) => {
889 sequential_update(|elem| self.insert(elem), sparse.iter().cloned())
891 HybridBitSet::Dense(dense) => self.union(dense),
895 fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
896 assert_eq!(self.domain_size, other.domain_size());
898 HybridBitSet::Sparse(sparse) => {
899 sequential_update(|elem| self.remove(elem), sparse.iter().cloned())
901 HybridBitSet::Dense(dense) => self.subtract(dense),
905 fn intersect(&mut self, other: &HybridBitSet<T>) -> bool {
906 assert_eq!(self.domain_size, other.domain_size());
908 HybridBitSet::Sparse(sparse) => {
909 let (updated, changed) = dense_sparse_intersect(self, sparse);
911 // We can't directly assign the SparseBitSet to the BitSet, and
912 // doing `*self = updated.to_dense()` would cause a drop / reallocation. Instead,
913 // the BitSet is cleared and `updated` is copied into `self`.
915 for elem in updated.iter() {
920 HybridBitSet::Dense(dense) => self.intersect(dense),
926 impl<T: Idx> BitRelations<HybridBitSet<T>> for HybridBitSet<T> {
927 fn union(&mut self, other: &HybridBitSet<T>) -> bool {
928 assert_eq!(self.domain_size(), other.domain_size());
930 HybridBitSet::Sparse(_) => {
932 HybridBitSet::Sparse(other_sparse) => {
933 // Both sets are sparse. Add the elements in
934 // `other_sparse` to `self` one at a time. This
935 // may or may not cause `self` to be densified.
936 let mut changed = false;
937 for elem in other_sparse.iter() {
938 changed |= self.insert(*elem);
943 HybridBitSet::Dense(other_dense) => self.union(other_dense),
947 HybridBitSet::Dense(self_dense) => self_dense.union(other),
951 fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
952 assert_eq!(self.domain_size(), other.domain_size());
954 HybridBitSet::Sparse(self_sparse) => {
955 sequential_update(|elem| self_sparse.remove(elem), other.iter())
957 HybridBitSet::Dense(self_dense) => self_dense.subtract(other),
961 fn intersect(&mut self, other: &HybridBitSet<T>) -> bool {
962 assert_eq!(self.domain_size(), other.domain_size());
964 HybridBitSet::Sparse(self_sparse) => {
965 sparse_intersect(self_sparse, |elem| other.contains(*elem))
967 HybridBitSet::Dense(self_dense) => match other {
968 HybridBitSet::Sparse(other_sparse) => {
969 let (updated, changed) = dense_sparse_intersect(self_dense, other_sparse);
970 *self = HybridBitSet::Sparse(updated);
973 HybridBitSet::Dense(other_dense) => self_dense.intersect(other_dense),
979 impl<T> Clone for BitSet<T> {
980 fn clone(&self) -> Self {
981 BitSet { domain_size: self.domain_size, words: self.words.clone(), marker: PhantomData }
984 fn clone_from(&mut self, from: &Self) {
985 if self.domain_size != from.domain_size {
986 self.words.resize(from.domain_size, 0);
987 self.domain_size = from.domain_size;
990 self.words.copy_from_slice(&from.words);
994 impl<T: Idx> fmt::Debug for BitSet<T> {
995 fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
996 w.debug_list().entries(self.iter()).finish()
1000 impl<T: Idx> ToString for BitSet<T> {
1001 fn to_string(&self) -> String {
1002 let mut result = String::new();
1005 // Note: this is a little endian printout of bytes.
1007 // i tracks how many bits we have printed so far.
1009 for word in &self.words {
1010 let mut word = *word;
1011 for _ in 0..WORD_BYTES {
1012 // for each byte in `word`:
1013 let remain = self.domain_size - i;
1014 // If less than a byte remains, then mask just that many bits.
1015 let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF };
1016 assert!(mask <= 0xFF);
1017 let byte = word & mask;
1019 result.push_str(&format!("{}{:02x}", sep, byte));
1036 pub struct BitIter<'a, T: Idx> {
1037 /// A copy of the current word, but with any already-visited bits cleared.
1038 /// (This lets us use `trailing_zeros()` to find the next set bit.) When it
1039 /// is reduced to 0, we move onto the next word.
1042 /// The offset (measured in bits) of the current word.
1045 /// Underlying iterator over the words.
1046 iter: slice::Iter<'a, Word>,
1048 marker: PhantomData<T>,
1051 impl<'a, T: Idx> BitIter<'a, T> {
1053 fn new(words: &'a [Word]) -> BitIter<'a, T> {
1054 // We initialize `word` and `offset` to degenerate values. On the first
1055 // call to `next()` we will fall through to getting the first word from
1056 // `iter`, which sets `word` to the first word (if there is one) and
1057 // `offset` to 0. Doing it this way saves us from having to maintain
1058 // additional state about whether we have started.
1061 offset: usize::MAX - (WORD_BITS - 1),
1063 marker: PhantomData,
1068 impl<'a, T: Idx> Iterator for BitIter<'a, T> {
1070 fn next(&mut self) -> Option<T> {
1073 // Get the position of the next set bit in the current word,
1074 // then clear the bit.
1075 let bit_pos = self.word.trailing_zeros() as usize;
1076 let bit = 1 << bit_pos;
1078 return Some(T::new(bit_pos + self.offset));
1081 // Move onto the next word. `wrapping_add()` is needed to handle
1082 // the degenerate initial value given to `offset` in `new()`.
1083 let word = self.iter.next()?;
1085 self.offset = self.offset.wrapping_add(WORD_BITS);
1091 fn bitwise<Op>(out_vec: &mut [Word], in_vec: &[Word], op: Op) -> bool
1093 Op: Fn(Word, Word) -> Word,
1095 assert_eq!(out_vec.len(), in_vec.len());
1096 let mut changed = 0;
1097 for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
1098 let old_val = *out_elem;
1099 let new_val = op(old_val, *in_elem);
1100 *out_elem = new_val;
1101 // This is essentially equivalent to a != with changed being a bool, but
1102 // in practice this code gets auto-vectorized by the compiler for most
1103 // operators. Using != here causes us to generate quite poor code as the
1104 // compiler tries to go back to a boolean on each loop iteration.
1105 changed |= old_val ^ new_val;
1110 /// Does this bitwise operation change `out_vec`?
1112 fn bitwise_changes<Op>(out_vec: &[Word], in_vec: &[Word], op: Op) -> bool
1114 Op: Fn(Word, Word) -> Word,
1116 assert_eq!(out_vec.len(), in_vec.len());
1117 for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
1118 let old_val = *out_elem;
1119 let new_val = op(old_val, *in_elem);
1120 if old_val != new_val {
1127 const SPARSE_MAX: usize = 8;
1129 /// A fixed-size bitset type with a sparse representation and a maximum of
1130 /// `SPARSE_MAX` elements. The elements are stored as a sorted `ArrayVec` with
1133 /// This type is used by `HybridBitSet`; do not use directly.
1134 #[derive(Clone, Debug)]
1135 pub struct SparseBitSet<T> {
1137 elems: ArrayVec<T, SPARSE_MAX>,
1140 impl<T: Idx> SparseBitSet<T> {
1141 fn new_empty(domain_size: usize) -> Self {
1142 SparseBitSet { domain_size, elems: ArrayVec::new() }
1145 fn len(&self) -> usize {
1149 fn is_empty(&self) -> bool {
1150 self.elems.len() == 0
1153 fn contains(&self, elem: T) -> bool {
1154 assert!(elem.index() < self.domain_size);
1155 self.elems.contains(&elem)
1158 fn insert(&mut self, elem: T) -> bool {
1159 assert!(elem.index() < self.domain_size);
1160 let changed = if let Some(i) = self.elems.iter().position(|&e| e.index() >= elem.index()) {
1161 if self.elems[i] == elem {
1162 // `elem` is already in the set.
1165 // `elem` is smaller than one or more existing elements.
1166 self.elems.insert(i, elem);
1170 // `elem` is larger than all existing elements.
1171 self.elems.push(elem);
1174 assert!(self.len() <= SPARSE_MAX);
1178 fn remove(&mut self, elem: T) -> bool {
1179 assert!(elem.index() < self.domain_size);
1180 if let Some(i) = self.elems.iter().position(|&e| e == elem) {
1181 self.elems.remove(i);
1188 fn to_dense(&self) -> BitSet<T> {
1189 let mut dense = BitSet::new_empty(self.domain_size);
1190 for elem in self.elems.iter() {
1191 dense.insert(*elem);
1196 fn iter(&self) -> slice::Iter<'_, T> {
1200 bit_relations_inherent_impls! {}
1203 impl<T: Idx + Ord> SparseBitSet<T> {
1204 fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
1205 let mut last_leq = None;
1206 for e in self.iter() {
1207 if range.contains(e) {
1208 last_leq = Some(*e);
1215 /// A fixed-size bitset type with a hybrid representation: sparse when there
1216 /// are up to a `SPARSE_MAX` elements in the set, but dense when there are more
1217 /// than `SPARSE_MAX`.
1219 /// This type is especially efficient for sets that typically have a small
1220 /// number of elements, but a large `domain_size`, and are cleared frequently.
1222 /// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
1223 /// just be `usize`.
1225 /// All operations that involve an element will panic if the element is equal
1226 /// to or greater than the domain size. All operations that involve two bitsets
1227 /// will panic if the bitsets have differing domain sizes.
1229 pub enum HybridBitSet<T> {
1230 Sparse(SparseBitSet<T>),
1234 impl<T: Idx> fmt::Debug for HybridBitSet<T> {
1235 fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
1237 Self::Sparse(b) => b.fmt(w),
1238 Self::Dense(b) => b.fmt(w),
1243 impl<T: Idx> HybridBitSet<T> {
1244 pub fn new_empty(domain_size: usize) -> Self {
1245 HybridBitSet::Sparse(SparseBitSet::new_empty(domain_size))
1248 pub fn domain_size(&self) -> usize {
1250 HybridBitSet::Sparse(sparse) => sparse.domain_size,
1251 HybridBitSet::Dense(dense) => dense.domain_size,
1255 pub fn clear(&mut self) {
1256 let domain_size = self.domain_size();
1257 *self = HybridBitSet::new_empty(domain_size);
1260 pub fn contains(&self, elem: T) -> bool {
1262 HybridBitSet::Sparse(sparse) => sparse.contains(elem),
1263 HybridBitSet::Dense(dense) => dense.contains(elem),
1267 pub fn superset(&self, other: &HybridBitSet<T>) -> bool {
1268 match (self, other) {
1269 (HybridBitSet::Dense(self_dense), HybridBitSet::Dense(other_dense)) => {
1270 self_dense.superset(other_dense)
1273 assert!(self.domain_size() == other.domain_size());
1274 other.iter().all(|elem| self.contains(elem))
1279 pub fn is_empty(&self) -> bool {
1281 HybridBitSet::Sparse(sparse) => sparse.is_empty(),
1282 HybridBitSet::Dense(dense) => dense.is_empty(),
1286 /// Returns the previous element present in the bitset from `elem`,
1287 /// inclusively of elem. That is, will return `Some(elem)` if elem is in the
1289 pub fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T>
1294 HybridBitSet::Sparse(sparse) => sparse.last_set_in(range),
1295 HybridBitSet::Dense(dense) => dense.last_set_in(range),
1299 pub fn insert(&mut self, elem: T) -> bool {
1300 // No need to check `elem` against `self.domain_size` here because all
1301 // the match cases check it, one way or another.
1303 HybridBitSet::Sparse(sparse) if sparse.len() < SPARSE_MAX => {
1304 // The set is sparse and has space for `elem`.
1307 HybridBitSet::Sparse(sparse) if sparse.contains(elem) => {
1308 // The set is sparse and does not have space for `elem`, but
1309 // that doesn't matter because `elem` is already present.
1312 HybridBitSet::Sparse(sparse) => {
1313 // The set is sparse and full. Convert to a dense set.
1314 let mut dense = sparse.to_dense();
1315 let changed = dense.insert(elem);
1317 *self = HybridBitSet::Dense(dense);
1320 HybridBitSet::Dense(dense) => dense.insert(elem),
1324 pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
1325 // No need to check `elem` against `self.domain_size` here because all
1326 // the match cases check it, one way or another.
1327 let start = match elems.start_bound().cloned() {
1328 Bound::Included(start) => start.index(),
1329 Bound::Excluded(start) => start.index() + 1,
1330 Bound::Unbounded => 0,
1332 let end = match elems.end_bound().cloned() {
1333 Bound::Included(end) => end.index() + 1,
1334 Bound::Excluded(end) => end.index(),
1335 Bound::Unbounded => self.domain_size() - 1,
1337 let Some(len) = end.checked_sub(start) else { return };
1339 HybridBitSet::Sparse(sparse) if sparse.len() + len < SPARSE_MAX => {
1340 // The set is sparse and has space for `elems`.
1341 for elem in start..end {
1342 sparse.insert(T::new(elem));
1345 HybridBitSet::Sparse(sparse) => {
1346 // The set is sparse and full. Convert to a dense set.
1347 let mut dense = sparse.to_dense();
1348 dense.insert_range(elems);
1349 *self = HybridBitSet::Dense(dense);
1351 HybridBitSet::Dense(dense) => dense.insert_range(elems),
1355 pub fn insert_all(&mut self) {
1356 let domain_size = self.domain_size();
1358 HybridBitSet::Sparse(_) => {
1359 *self = HybridBitSet::Dense(BitSet::new_filled(domain_size));
1361 HybridBitSet::Dense(dense) => dense.insert_all(),
1365 pub fn remove(&mut self, elem: T) -> bool {
1366 // Note: we currently don't bother going from Dense back to Sparse.
1368 HybridBitSet::Sparse(sparse) => sparse.remove(elem),
1369 HybridBitSet::Dense(dense) => dense.remove(elem),
1373 /// Converts to a dense set, consuming itself in the process.
1374 pub fn to_dense(self) -> BitSet<T> {
1376 HybridBitSet::Sparse(sparse) => sparse.to_dense(),
1377 HybridBitSet::Dense(dense) => dense,
1381 pub fn iter(&self) -> HybridIter<'_, T> {
1383 HybridBitSet::Sparse(sparse) => HybridIter::Sparse(sparse.iter()),
1384 HybridBitSet::Dense(dense) => HybridIter::Dense(dense.iter()),
1388 bit_relations_inherent_impls! {}
1391 pub enum HybridIter<'a, T: Idx> {
1392 Sparse(slice::Iter<'a, T>),
1393 Dense(BitIter<'a, T>),
1396 impl<'a, T: Idx> Iterator for HybridIter<'a, T> {
1399 fn next(&mut self) -> Option<T> {
1401 HybridIter::Sparse(sparse) => sparse.next().copied(),
1402 HybridIter::Dense(dense) => dense.next(),
1407 /// A resizable bitset type with a dense representation.
1409 /// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
1410 /// just be `usize`.
1412 /// All operations that involve an element will panic if the element is equal
1413 /// to or greater than the domain size.
1414 #[derive(Clone, Debug, PartialEq)]
1415 pub struct GrowableBitSet<T: Idx> {
1419 impl<T: Idx> Default for GrowableBitSet<T> {
1420 fn default() -> Self {
1421 GrowableBitSet::new_empty()
1425 impl<T: Idx> GrowableBitSet<T> {
1426 /// Ensure that the set can hold at least `min_domain_size` elements.
1427 pub fn ensure(&mut self, min_domain_size: usize) {
1428 if self.bit_set.domain_size < min_domain_size {
1429 self.bit_set.domain_size = min_domain_size;
1432 let min_num_words = num_words(min_domain_size);
1433 if self.bit_set.words.len() < min_num_words {
1434 self.bit_set.words.resize(min_num_words, 0)
1438 pub fn new_empty() -> GrowableBitSet<T> {
1439 GrowableBitSet { bit_set: BitSet::new_empty(0) }
1442 pub fn with_capacity(capacity: usize) -> GrowableBitSet<T> {
1443 GrowableBitSet { bit_set: BitSet::new_empty(capacity) }
1446 /// Returns `true` if the set has changed.
1448 pub fn insert(&mut self, elem: T) -> bool {
1449 self.ensure(elem.index() + 1);
1450 self.bit_set.insert(elem)
1453 /// Returns `true` if the set has changed.
1455 pub fn remove(&mut self, elem: T) -> bool {
1456 self.ensure(elem.index() + 1);
1457 self.bit_set.remove(elem)
1461 pub fn is_empty(&self) -> bool {
1462 self.bit_set.is_empty()
1466 pub fn contains(&self, elem: T) -> bool {
1467 let (word_index, mask) = word_index_and_mask(elem);
1468 self.bit_set.words.get(word_index).map_or(false, |word| (word & mask) != 0)
1472 /// A fixed-size 2D bit matrix type with a dense representation.
1474 /// `R` and `C` are index types used to identify rows and columns respectively;
1475 /// typically newtyped `usize` wrappers, but they can also just be `usize`.
1477 /// All operations that involve a row and/or column index will panic if the
1478 /// index exceeds the relevant bound.
1479 #[derive(Clone, Eq, PartialEq, Hash, Decodable, Encodable)]
1480 pub struct BitMatrix<R: Idx, C: Idx> {
1484 marker: PhantomData<(R, C)>,
1487 impl<R: Idx, C: Idx> BitMatrix<R, C> {
1488 /// Creates a new `rows x columns` matrix, initially empty.
1489 pub fn new(num_rows: usize, num_columns: usize) -> BitMatrix<R, C> {
1490 // For every element, we need one bit for every other
1491 // element. Round up to an even number of words.
1492 let words_per_row = num_words(num_columns);
1496 words: vec![0; num_rows * words_per_row],
1497 marker: PhantomData,
1501 /// Creates a new matrix, with `row` used as the value for every row.
1502 pub fn from_row_n(row: &BitSet<C>, num_rows: usize) -> BitMatrix<R, C> {
1503 let num_columns = row.domain_size();
1504 let words_per_row = num_words(num_columns);
1505 assert_eq!(words_per_row, row.words().len());
1509 words: iter::repeat(row.words()).take(num_rows).flatten().cloned().collect(),
1510 marker: PhantomData,
1514 pub fn rows(&self) -> impl Iterator<Item = R> {
1515 (0..self.num_rows).map(R::new)
1518 /// The range of bits for a given row.
1519 fn range(&self, row: R) -> (usize, usize) {
1520 let words_per_row = num_words(self.num_columns);
1521 let start = row.index() * words_per_row;
1522 (start, start + words_per_row)
1525 /// Sets the cell at `(row, column)` to true. Put another way, insert
1526 /// `column` to the bitset for `row`.
1528 /// Returns `true` if this changed the matrix.
1529 pub fn insert(&mut self, row: R, column: C) -> bool {
1530 assert!(row.index() < self.num_rows && column.index() < self.num_columns);
1531 let (start, _) = self.range(row);
1532 let (word_index, mask) = word_index_and_mask(column);
1533 let words = &mut self.words[..];
1534 let word = words[start + word_index];
1535 let new_word = word | mask;
1536 words[start + word_index] = new_word;
1540 /// Do the bits from `row` contain `column`? Put another way, is
1541 /// the matrix cell at `(row, column)` true? Put yet another way,
1542 /// if the matrix represents (transitive) reachability, can
1543 /// `row` reach `column`?
1544 pub fn contains(&self, row: R, column: C) -> bool {
1545 assert!(row.index() < self.num_rows && column.index() < self.num_columns);
1546 let (start, _) = self.range(row);
1547 let (word_index, mask) = word_index_and_mask(column);
1548 (self.words[start + word_index] & mask) != 0
1551 /// Returns those indices that are true in rows `a` and `b`. This
1552 /// is an *O*(*n*) operation where *n* is the number of elements
1553 /// (somewhat independent from the actual size of the
1554 /// intersection, in particular).
1555 pub fn intersect_rows(&self, row1: R, row2: R) -> Vec<C> {
1556 assert!(row1.index() < self.num_rows && row2.index() < self.num_rows);
1557 let (row1_start, row1_end) = self.range(row1);
1558 let (row2_start, row2_end) = self.range(row2);
1559 let mut result = Vec::with_capacity(self.num_columns);
1560 for (base, (i, j)) in (row1_start..row1_end).zip(row2_start..row2_end).enumerate() {
1561 let mut v = self.words[i] & self.words[j];
1562 for bit in 0..WORD_BITS {
1567 result.push(C::new(base * WORD_BITS + bit));
1575 /// Adds the bits from row `read` to the bits from row `write`, and
1576 /// returns `true` if anything changed.
1578 /// This is used when computing transitive reachability because if
1579 /// you have an edge `write -> read`, because in that case
1580 /// `write` can reach everything that `read` can (and
1581 /// potentially more).
1582 pub fn union_rows(&mut self, read: R, write: R) -> bool {
1583 assert!(read.index() < self.num_rows && write.index() < self.num_rows);
1584 let (read_start, read_end) = self.range(read);
1585 let (write_start, write_end) = self.range(write);
1586 let words = &mut self.words[..];
1587 let mut changed = false;
1588 for (read_index, write_index) in iter::zip(read_start..read_end, write_start..write_end) {
1589 let word = words[write_index];
1590 let new_word = word | words[read_index];
1591 words[write_index] = new_word;
1592 changed |= word != new_word;
1597 /// Adds the bits from `with` to the bits from row `write`, and
1598 /// returns `true` if anything changed.
1599 pub fn union_row_with(&mut self, with: &BitSet<C>, write: R) -> bool {
1600 assert!(write.index() < self.num_rows);
1601 assert_eq!(with.domain_size(), self.num_columns);
1602 let (write_start, write_end) = self.range(write);
1603 let mut changed = false;
1604 for (read_index, write_index) in iter::zip(0..with.words().len(), write_start..write_end) {
1605 let word = self.words[write_index];
1606 let new_word = word | with.words()[read_index];
1607 self.words[write_index] = new_word;
1608 changed |= word != new_word;
1613 /// Sets every cell in `row` to true.
1614 pub fn insert_all_into_row(&mut self, row: R) {
1615 assert!(row.index() < self.num_rows);
1616 let (start, end) = self.range(row);
1617 let words = &mut self.words[..];
1618 for index in start..end {
1621 clear_excess_bits_in_final_word(self.num_columns, &mut self.words[..end]);
1624 /// Gets a slice of the underlying words.
1625 pub fn words(&self) -> &[Word] {
1629 /// Iterates through all the columns set to true in a given row of
1631 pub fn iter(&self, row: R) -> BitIter<'_, C> {
1632 assert!(row.index() < self.num_rows);
1633 let (start, end) = self.range(row);
1634 BitIter::new(&self.words[start..end])
1637 /// Returns the number of elements in `row`.
1638 pub fn count(&self, row: R) -> usize {
1639 let (start, end) = self.range(row);
1640 self.words[start..end].iter().map(|e| e.count_ones() as usize).sum()
1644 impl<R: Idx, C: Idx> fmt::Debug for BitMatrix<R, C> {
1645 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1646 /// Forces its contents to print in regular mode instead of alternate mode.
1647 struct OneLinePrinter<T>(T);
1648 impl<T: fmt::Debug> fmt::Debug for OneLinePrinter<T> {
1649 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1650 write!(fmt, "{:?}", self.0)
1654 write!(fmt, "BitMatrix({}x{}) ", self.num_rows, self.num_columns)?;
1655 let items = self.rows().flat_map(|r| self.iter(r).map(move |c| (r, c)));
1656 fmt.debug_set().entries(items.map(OneLinePrinter)).finish()
1660 /// A fixed-column-size, variable-row-size 2D bit matrix with a moderately
1661 /// sparse representation.
1663 /// Initially, every row has no explicit representation. If any bit within a
1664 /// row is set, the entire row is instantiated as `Some(<HybridBitSet>)`.
1665 /// Furthermore, any previously uninstantiated rows prior to it will be
1666 /// instantiated as `None`. Those prior rows may themselves become fully
1667 /// instantiated later on if any of their bits are set.
1669 /// `R` and `C` are index types used to identify rows and columns respectively;
1670 /// typically newtyped `usize` wrappers, but they can also just be `usize`.
1671 #[derive(Clone, Debug)]
1672 pub struct SparseBitMatrix<R, C>
1678 rows: IndexVec<R, Option<HybridBitSet<C>>>,
1681 impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
1682 /// Creates a new empty sparse bit matrix with no rows or columns.
1683 pub fn new(num_columns: usize) -> Self {
1684 Self { num_columns, rows: IndexVec::new() }
1687 fn ensure_row(&mut self, row: R) -> &mut HybridBitSet<C> {
1688 // Instantiate any missing rows up to and including row `row` with an empty HybridBitSet.
1689 // Then replace row `row` with a full HybridBitSet if necessary.
1690 self.rows.get_or_insert_with(row, || HybridBitSet::new_empty(self.num_columns))
1693 /// Sets the cell at `(row, column)` to true. Put another way, insert
1694 /// `column` to the bitset for `row`.
1696 /// Returns `true` if this changed the matrix.
1697 pub fn insert(&mut self, row: R, column: C) -> bool {
1698 self.ensure_row(row).insert(column)
1701 /// Sets the cell at `(row, column)` to false. Put another way, delete
1702 /// `column` from the bitset for `row`. Has no effect if `row` does not
1705 /// Returns `true` if this changed the matrix.
1706 pub fn remove(&mut self, row: R, column: C) -> bool {
1707 match self.rows.get_mut(row) {
1708 Some(Some(row)) => row.remove(column),
1713 /// Sets all columns at `row` to false. Has no effect if `row` does
1715 pub fn clear(&mut self, row: R) {
1716 if let Some(Some(row)) = self.rows.get_mut(row) {
1721 /// Do the bits from `row` contain `column`? Put another way, is
1722 /// the matrix cell at `(row, column)` true? Put yet another way,
1723 /// if the matrix represents (transitive) reachability, can
1724 /// `row` reach `column`?
1725 pub fn contains(&self, row: R, column: C) -> bool {
1726 self.row(row).map_or(false, |r| r.contains(column))
1729 /// Adds the bits from row `read` to the bits from row `write`, and
1730 /// returns `true` if anything changed.
1732 /// This is used when computing transitive reachability because if
1733 /// you have an edge `write -> read`, because in that case
1734 /// `write` can reach everything that `read` can (and
1735 /// potentially more).
1736 pub fn union_rows(&mut self, read: R, write: R) -> bool {
1737 if read == write || self.row(read).is_none() {
1741 self.ensure_row(write);
1742 if let (Some(read_row), Some(write_row)) = self.rows.pick2_mut(read, write) {
1743 write_row.union(read_row)
1749 /// Insert all bits in the given row.
1750 pub fn insert_all_into_row(&mut self, row: R) {
1751 self.ensure_row(row).insert_all();
1754 pub fn rows(&self) -> impl Iterator<Item = R> {
1758 /// Iterates through all the columns set to true in a given row of
1760 pub fn iter<'a>(&'a self, row: R) -> impl Iterator<Item = C> + 'a {
1761 self.row(row).into_iter().flat_map(|r| r.iter())
1764 pub fn row(&self, row: R) -> Option<&HybridBitSet<C>> {
1765 self.rows.get(row)?.as_ref()
1768 /// Intersects `row` with `set`. `set` can be either `BitSet` or
1769 /// `HybridBitSet`. Has no effect if `row` does not exist.
1771 /// Returns true if the row was changed.
1772 pub fn intersect_row<Set>(&mut self, row: R, set: &Set) -> bool
1774 HybridBitSet<C>: BitRelations<Set>,
1776 match self.rows.get_mut(row) {
1777 Some(Some(row)) => row.intersect(set),
1782 /// Subtracts `set from `row`. `set` can be either `BitSet` or
1783 /// `HybridBitSet`. Has no effect if `row` does not exist.
1785 /// Returns true if the row was changed.
1786 pub fn subtract_row<Set>(&mut self, row: R, set: &Set) -> bool
1788 HybridBitSet<C>: BitRelations<Set>,
1790 match self.rows.get_mut(row) {
1791 Some(Some(row)) => row.subtract(set),
1796 /// Unions `row` with `set`. `set` can be either `BitSet` or
1799 /// Returns true if the row was changed.
1800 pub fn union_row<Set>(&mut self, row: R, set: &Set) -> bool
1802 HybridBitSet<C>: BitRelations<Set>,
1804 self.ensure_row(row).union(set)
1809 fn num_words<T: Idx>(domain_size: T) -> usize {
1810 (domain_size.index() + WORD_BITS - 1) / WORD_BITS
1814 fn num_chunks<T: Idx>(domain_size: T) -> usize {
1815 assert!(domain_size.index() > 0);
1816 (domain_size.index() + CHUNK_BITS - 1) / CHUNK_BITS
1820 fn word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
1821 let elem = elem.index();
1822 let word_index = elem / WORD_BITS;
1823 let mask = 1 << (elem % WORD_BITS);
1828 fn chunk_index<T: Idx>(elem: T) -> usize {
1829 elem.index() / CHUNK_BITS
1833 fn chunk_word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
1834 let chunk_elem = elem.index() % CHUNK_BITS;
1835 word_index_and_mask(chunk_elem)
1838 fn clear_excess_bits_in_final_word(domain_size: usize, words: &mut [Word]) {
1839 let num_bits_in_final_word = domain_size % WORD_BITS;
1840 if num_bits_in_final_word > 0 {
1841 let mask = (1 << num_bits_in_final_word) - 1;
1842 words[words.len() - 1] &= mask;
1847 fn max_bit(word: Word) -> usize {
1848 WORD_BITS - 1 - word.leading_zeros() as usize
1851 /// Integral type used to represent the bit set.
1852 pub trait FiniteBitSetTy:
1853 BitAnd<Output = Self>
1859 + Not<Output = Self>
1863 /// Size of the domain representable by this type, e.g. 64 for `u64`.
1864 const DOMAIN_SIZE: u32;
1866 /// Value which represents the `FiniteBitSet` having every bit set.
1868 /// Value which represents the `FiniteBitSet` having no bits set.
1871 /// Value for one as the integral type.
1873 /// Value for zero as the integral type.
1876 /// Perform a checked left shift on the integral type.
1877 fn checked_shl(self, rhs: u32) -> Option<Self>;
1878 /// Perform a checked right shift on the integral type.
1879 fn checked_shr(self, rhs: u32) -> Option<Self>;
1882 impl FiniteBitSetTy for u32 {
1883 const DOMAIN_SIZE: u32 = 32;
1885 const FILLED: Self = Self::MAX;
1886 const EMPTY: Self = Self::MIN;
1888 const ONE: Self = 1u32;
1889 const ZERO: Self = 0u32;
1891 fn checked_shl(self, rhs: u32) -> Option<Self> {
1892 self.checked_shl(rhs)
1895 fn checked_shr(self, rhs: u32) -> Option<Self> {
1896 self.checked_shr(rhs)
1900 impl std::fmt::Debug for FiniteBitSet<u32> {
1901 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1902 write!(f, "{:032b}", self.0)
1906 impl FiniteBitSetTy for u64 {
1907 const DOMAIN_SIZE: u32 = 64;
1909 const FILLED: Self = Self::MAX;
1910 const EMPTY: Self = Self::MIN;
1912 const ONE: Self = 1u64;
1913 const ZERO: Self = 0u64;
1915 fn checked_shl(self, rhs: u32) -> Option<Self> {
1916 self.checked_shl(rhs)
1919 fn checked_shr(self, rhs: u32) -> Option<Self> {
1920 self.checked_shr(rhs)
1924 impl std::fmt::Debug for FiniteBitSet<u64> {
1925 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1926 write!(f, "{:064b}", self.0)
1930 impl FiniteBitSetTy for u128 {
1931 const DOMAIN_SIZE: u32 = 128;
1933 const FILLED: Self = Self::MAX;
1934 const EMPTY: Self = Self::MIN;
1936 const ONE: Self = 1u128;
1937 const ZERO: Self = 0u128;
1939 fn checked_shl(self, rhs: u32) -> Option<Self> {
1940 self.checked_shl(rhs)
1943 fn checked_shr(self, rhs: u32) -> Option<Self> {
1944 self.checked_shr(rhs)
1948 impl std::fmt::Debug for FiniteBitSet<u128> {
1949 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1950 write!(f, "{:0128b}", self.0)
1954 /// A fixed-sized bitset type represented by an integer type. Indices outwith than the range
1955 /// representable by `T` are considered set.
1956 #[derive(Copy, Clone, Eq, PartialEq, Decodable, Encodable)]
1957 pub struct FiniteBitSet<T: FiniteBitSetTy>(pub T);
1959 impl<T: FiniteBitSetTy> FiniteBitSet<T> {
1960 /// Creates a new, empty bitset.
1961 pub fn new_empty() -> Self {
1965 /// Sets the `index`th bit.
1966 pub fn set(&mut self, index: u32) {
1967 self.0 |= T::ONE.checked_shl(index).unwrap_or(T::ZERO);
1970 /// Unsets the `index`th bit.
1971 pub fn clear(&mut self, index: u32) {
1972 self.0 &= !T::ONE.checked_shl(index).unwrap_or(T::ZERO);
1975 /// Sets the `i`th to `j`th bits.
1976 pub fn set_range(&mut self, range: Range<u32>) {
1977 let bits = T::FILLED
1978 .checked_shl(range.end - range.start)
1981 .checked_shl(range.start)
1982 .unwrap_or(T::ZERO);
1986 /// Is the set empty?
1987 pub fn is_empty(&self) -> bool {
1991 /// Returns the domain size of the bitset.
1992 pub fn within_domain(&self, index: u32) -> bool {
1993 index < T::DOMAIN_SIZE
1996 /// Returns if the `index`th bit is set.
1997 pub fn contains(&self, index: u32) -> Option<bool> {
1998 self.within_domain(index)
1999 .then(|| ((self.0.checked_shr(index).unwrap_or(T::ONE)) & T::ONE) == T::ONE)
2003 impl<T: FiniteBitSetTy> Default for FiniteBitSet<T> {
2004 fn default() -> Self {