+#[derive(Clone, Copy)]
+struct RWU {
+ reader: LiveNode,
+ writer: LiveNode,
+ used: bool
+}
+
+/// Conceptually, this is like a `Vec<RWU>`. But the number of `RWU`s can get
+/// very large, so it uses a more compact representation that takes advantage
+/// of the fact that when the number of `RWU`s is large, most of them have an
+/// invalid reader and an invalid writer.
+struct RWUTable {
+ /// Each entry in `packed_rwus` is either INV_INV_FALSE, INV_INV_TRUE, or
+ /// an index into `unpacked_rwus`. In the common cases, this compacts the
+ /// 65 bits of data into 32; in the uncommon cases, it expands the 65 bits
+ /// in 96.
+ ///
+ /// More compact representations are possible -- e.g. use only 2 bits per
+ /// packed `RWU` and make the secondary table a HashMap that maps from
+ /// indices to `RWU`s -- but this one strikes a good balance between size
+ /// and speed.
+ packed_rwus: Vec<u32>,
+ unpacked_rwus: Vec<RWU>,
+}
+
+// A constant representing `RWU { reader: invalid_node(); writer: invalid_node(); used: false }`.
+const INV_INV_FALSE: u32 = u32::MAX;
+
+// A constant representing `RWU { reader: invalid_node(); writer: invalid_node(); used: true }`.
+const INV_INV_TRUE: u32 = u32::MAX - 1;
+
+impl RWUTable {
+ fn new(num_rwus: usize) -> RWUTable {
+ Self {
+ packed_rwus: vec![INV_INV_FALSE; num_rwus],
+ unpacked_rwus: vec![],
+ }
+ }
+
+ fn get(&self, idx: usize) -> RWU {
+ let packed_rwu = self.packed_rwus[idx];
+ match packed_rwu {
+ INV_INV_FALSE => RWU { reader: invalid_node(), writer: invalid_node(), used: false },
+ INV_INV_TRUE => RWU { reader: invalid_node(), writer: invalid_node(), used: true },
+ _ => self.unpacked_rwus[packed_rwu as usize],
+ }
+ }
+
+ fn get_reader(&self, idx: usize) -> LiveNode {
+ let packed_rwu = self.packed_rwus[idx];
+ match packed_rwu {
+ INV_INV_FALSE | INV_INV_TRUE => invalid_node(),
+ _ => self.unpacked_rwus[packed_rwu as usize].reader,
+ }
+ }
+
+ fn get_writer(&self, idx: usize) -> LiveNode {
+ let packed_rwu = self.packed_rwus[idx];
+ match packed_rwu {
+ INV_INV_FALSE | INV_INV_TRUE => invalid_node(),
+ _ => self.unpacked_rwus[packed_rwu as usize].writer,
+ }
+ }
+
+ fn get_used(&self, idx: usize) -> bool {
+ let packed_rwu = self.packed_rwus[idx];
+ match packed_rwu {
+ INV_INV_FALSE => false,
+ INV_INV_TRUE => true,
+ _ => self.unpacked_rwus[packed_rwu as usize].used,
+ }
+ }
+
+ #[inline]
+ fn copy_packed(&mut self, dst_idx: usize, src_idx: usize) {
+ self.packed_rwus[dst_idx] = self.packed_rwus[src_idx];
+ }
+
+ fn assign_unpacked(&mut self, idx: usize, rwu: RWU) {
+ if rwu.reader == invalid_node() && rwu.writer == invalid_node() {
+ // When we overwrite an indexing entry in `self.packed_rwus` with
+ // `INV_INV_{TRUE,FALSE}` we don't remove the corresponding entry
+ // from `self.unpacked_rwus`; it's not worth the effort, and we
+ // can't have entries shifting around anyway.
+ self.packed_rwus[idx] = if rwu.used {
+ INV_INV_TRUE
+ } else {
+ INV_INV_FALSE
+ }
+ } else {
+ // Add a new RWU to `unpacked_rwus` and make `packed_rwus[idx]`
+ // point to it.
+ self.packed_rwus[idx] = self.unpacked_rwus.len() as u32;
+ self.unpacked_rwus.push(rwu);
+ }
+ }
+
+ fn assign_inv_inv(&mut self, idx: usize) {
+ self.packed_rwus[idx] = if self.get_used(idx) {
+ INV_INV_TRUE
+ } else {
+ INV_INV_FALSE
+ };
+ }
+}
+