1 use crate::dep_graph::DepNodeIndex;
3 use rustc_arena::TypedArena;
4 use rustc_data_structures::fx::FxHashMap;
5 use rustc_data_structures::sharded;
6 #[cfg(parallel_compiler)]
7 use rustc_data_structures::sharded::Sharded;
8 #[cfg(not(parallel_compiler))]
9 use rustc_data_structures::sync::Lock;
10 use rustc_data_structures::sync::WorkerLocal;
11 use rustc_index::vec::{Idx, IndexVec};
12 use std::default::Default;
15 use std::marker::PhantomData;
17 pub trait CacheSelector<'tcx, V> {
24 pub trait QueryStorage {
28 /// Store a value without putting it in the cache.
29 /// This is meant to be used with cycle errors.
30 fn store_nocache(&self, value: Self::Value) -> Self::Stored;
33 pub trait QueryCache: QueryStorage + Sized {
34 type Key: Hash + Eq + Clone + Debug;
36 /// Checks if the query is already computed and in the cache.
37 /// It returns the shard index and a lock guard to the shard,
38 /// which will be used if the query is not in the cache and we need
43 // `on_hit` can be called while holding a lock to the query state shard.
47 OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
49 fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
51 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
54 pub struct DefaultCacheSelector<K>(PhantomData<K>);
56 impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector<K> {
57 type Cache = DefaultCache<K, V>
60 type ArenaCache = ArenaCache<'tcx, K, V>;
63 pub struct DefaultCache<K, V> {
64 #[cfg(parallel_compiler)]
65 cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
66 #[cfg(not(parallel_compiler))]
67 cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
70 impl<K, V> Default for DefaultCache<K, V> {
71 fn default() -> Self {
72 DefaultCache { cache: Default::default() }
76 impl<K: Eq + Hash, V: Clone + Debug> QueryStorage for DefaultCache<K, V> {
81 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
82 // We have no dedicated storage
87 impl<K, V> QueryCache for DefaultCache<K, V>
89 K: Eq + Hash + Clone + Debug,
95 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
97 OnHit: FnOnce(&V, DepNodeIndex) -> R,
99 let key_hash = sharded::make_hash(key);
100 #[cfg(parallel_compiler)]
101 let lock = self.cache.get_shard_by_hash(key_hash).lock();
102 #[cfg(not(parallel_compiler))]
103 let lock = self.cache.lock();
104 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
106 if let Some((_, value)) = result {
107 let hit_result = on_hit(&value.0, value.1);
115 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
116 #[cfg(parallel_compiler)]
117 let mut lock = self.cache.get_shard_by_value(&key).lock();
118 #[cfg(not(parallel_compiler))]
119 let mut lock = self.cache.lock();
120 lock.insert(key, (value.clone(), index));
124 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
125 #[cfg(parallel_compiler)]
127 let shards = self.cache.lock_shards();
128 for shard in shards.iter() {
129 for (k, v) in shard.iter() {
134 #[cfg(not(parallel_compiler))]
136 let map = self.cache.lock();
137 for (k, v) in map.iter() {
144 pub struct ArenaCache<'tcx, K, V> {
145 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
146 #[cfg(parallel_compiler)]
147 cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
148 #[cfg(not(parallel_compiler))]
149 cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
152 impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
153 fn default() -> Self {
154 ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
158 impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
160 type Stored = &'tcx V;
163 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
164 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
165 let value = unsafe { &*(&value.0 as *const _) };
170 impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
172 K: Eq + Hash + Clone + Debug,
178 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
180 OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
182 let key_hash = sharded::make_hash(key);
183 #[cfg(parallel_compiler)]
184 let lock = self.cache.get_shard_by_hash(key_hash).lock();
185 #[cfg(not(parallel_compiler))]
186 let lock = self.cache.lock();
187 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
189 if let Some((_, value)) = result {
190 let hit_result = on_hit(&&value.0, value.1);
198 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
199 let value = self.arena.alloc((value, index));
200 let value = unsafe { &*(value as *const _) };
201 #[cfg(parallel_compiler)]
202 let mut lock = self.cache.get_shard_by_value(&key).lock();
203 #[cfg(not(parallel_compiler))]
204 let mut lock = self.cache.lock();
205 lock.insert(key, value);
209 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
210 #[cfg(parallel_compiler)]
212 let shards = self.cache.lock_shards();
213 for shard in shards.iter() {
214 for (k, v) in shard.iter() {
219 #[cfg(not(parallel_compiler))]
221 let map = self.cache.lock();
222 for (k, v) in map.iter() {
229 pub struct VecCacheSelector<K>(PhantomData<K>);
231 impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
232 type Cache = VecCache<K, V>
235 type ArenaCache = VecArenaCache<'tcx, K, V>;
238 pub struct VecCache<K: Idx, V> {
239 #[cfg(parallel_compiler)]
240 cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
241 #[cfg(not(parallel_compiler))]
242 cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
245 impl<K: Idx, V> Default for VecCache<K, V> {
246 fn default() -> Self {
247 VecCache { cache: Default::default() }
251 impl<K: Eq + Idx, V: Clone + Debug> QueryStorage for VecCache<K, V> {
256 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
257 // We have no dedicated storage
262 impl<K, V> QueryCache for VecCache<K, V>
264 K: Eq + Idx + Clone + Debug,
270 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
272 OnHit: FnOnce(&V, DepNodeIndex) -> R,
274 #[cfg(parallel_compiler)]
275 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
276 #[cfg(not(parallel_compiler))]
277 let lock = self.cache.lock();
278 if let Some(Some(value)) = lock.get(*key) {
279 let hit_result = on_hit(&value.0, value.1);
287 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
288 #[cfg(parallel_compiler)]
289 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
290 #[cfg(not(parallel_compiler))]
291 let mut lock = self.cache.lock();
292 lock.insert(key, (value.clone(), index));
296 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
297 #[cfg(parallel_compiler)]
299 let shards = self.cache.lock_shards();
300 for shard in shards.iter() {
301 for (k, v) in shard.iter_enumerated() {
308 #[cfg(not(parallel_compiler))]
310 let map = self.cache.lock();
311 for (k, v) in map.iter_enumerated() {
320 pub struct VecArenaCache<'tcx, K: Idx, V> {
321 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
322 #[cfg(parallel_compiler)]
323 cache: Sharded<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
324 #[cfg(not(parallel_compiler))]
325 cache: Lock<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
328 impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> {
329 fn default() -> Self {
331 arena: WorkerLocal::new(|_| TypedArena::default()),
332 cache: Default::default(),
337 impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> {
339 type Stored = &'tcx V;
342 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
343 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
344 let value = unsafe { &*(&value.0 as *const _) };
349 impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V>
351 K: Eq + Idx + Clone + Debug,
357 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
359 OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
361 #[cfg(parallel_compiler)]
362 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
363 #[cfg(not(parallel_compiler))]
364 let lock = self.cache.lock();
365 if let Some(Some(value)) = lock.get(*key) {
366 let hit_result = on_hit(&&value.0, value.1);
374 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
375 let value = self.arena.alloc((value, index));
376 let value = unsafe { &*(value as *const _) };
377 #[cfg(parallel_compiler)]
378 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
379 #[cfg(not(parallel_compiler))]
380 let mut lock = self.cache.lock();
381 lock.insert(key, value);
385 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
386 #[cfg(parallel_compiler)]
388 let shards = self.cache.lock_shards();
389 for shard in shards.iter() {
390 for (k, v) in shard.iter_enumerated() {
397 #[cfg(not(parallel_compiler))]
399 let map = self.cache.lock();
400 for (k, v) in map.iter_enumerated() {