1 use std::hash::{Hasher, Hash};
3 use std::borrow::Borrow;
4 use std::collections::hash_map::RawEntryMut;
5 use smallvec::SmallVec;
6 use crate::fx::{FxHasher, FxHashMap};
7 use crate::sync::{Lock, LockGuard};
9 #[derive(Clone, Default)]
10 #[cfg_attr(parallel_compiler, repr(align(64)))]
11 struct CacheAligned<T>(T);
13 #[cfg(parallel_compiler)]
14 // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
15 // but this should be tested on higher core count CPUs. How the `Sharded` type gets used
16 // may also affect the ideal nunber of shards.
17 const SHARD_BITS: usize = 5;
19 #[cfg(not(parallel_compiler))]
20 const SHARD_BITS: usize = 0;
22 pub const SHARDS: usize = 1 << SHARD_BITS;
24 /// An array of cache-line aligned inner locked structures with convenience methods.
26 pub struct Sharded<T> {
27 shards: [CacheAligned<Lock<T>>; SHARDS],
30 impl<T: Default> Default for Sharded<T> {
32 fn default() -> Self {
33 Self::new(|| T::default())
39 pub fn new(mut value: impl FnMut() -> T) -> Self {
40 // Create a vector of the values we want
41 let mut values: SmallVec<[_; SHARDS]> = (0..SHARDS).map(|_| {
42 CacheAligned(Lock::new(value()))
45 // Create an unintialized array
46 let mut shards: mem::MaybeUninit<[CacheAligned<Lock<T>>; SHARDS]> =
47 mem::MaybeUninit::uninit();
50 // Copy the values into our array
51 let first = shards.as_mut_ptr() as *mut CacheAligned<Lock<T>>;
52 values.as_ptr().copy_to_nonoverlapping(first, SHARDS);
54 // Ignore the content of the vector
58 shards: shards.assume_init(),
63 /// The shard is selected by hashing `val` with `FxHasher`.
65 pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
69 self.get_shard_by_hash(make_hash(val))
73 /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
74 /// ever used in combination with `get_shard_by_hash` on a single `Sharded`
75 /// instance, then `hash` must be computed with `FxHasher`. Otherwise,
76 /// `hash` can be computed with any hasher, so long as that hasher is used
77 /// consistently for each `Sharded` instance.
79 pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
80 let hash_len = mem::size_of::<usize>();
81 // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
82 // hashbrown also uses the lowest bits, so we can't use those
83 let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
84 let i = bits % SHARDS;
88 pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
89 (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
92 pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
93 (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
97 pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
99 impl<K: Eq, V> ShardedHashMap<K, V> {
100 pub fn len(&self) -> usize {
101 self.lock_shards().iter().map(|shard| shard.len()).sum()
105 impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
107 pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K
111 let hash = make_hash(value);
112 let mut shard = self.get_shard_by_hash(hash).lock();
113 let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
116 RawEntryMut::Occupied(e) => *e.key(),
117 RawEntryMut::Vacant(e) => {
119 e.insert_hashed_nocheck(hash, v, ());
126 pub fn intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K
130 let hash = make_hash(&value);
131 let mut shard = self.get_shard_by_hash(hash).lock();
132 let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
135 RawEntryMut::Occupied(e) => *e.key(),
136 RawEntryMut::Vacant(e) => {
138 e.insert_hashed_nocheck(hash, v, ());
146 fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
147 let mut state = FxHasher::default();
148 val.hash(&mut state);