1 use crate::dep_graph::DepNodeIndex;
3 use rustc_arena::TypedArena;
4 use rustc_data_structures::fx::FxHashMap;
5 use rustc_data_structures::sharded;
6 #[cfg(parallel_compiler)]
7 use rustc_data_structures::sharded::Sharded;
8 #[cfg(not(parallel_compiler))]
9 use rustc_data_structures::sync::Lock;
10 use rustc_data_structures::sync::WorkerLocal;
11 use rustc_index::vec::{Idx, IndexVec};
14 use std::marker::PhantomData;
16 pub trait CacheSelector<'tcx, V> {
23 pub trait QueryStorage {
27 /// Store a value without putting it in the cache.
28 /// This is meant to be used with cycle errors.
29 fn store_nocache(&self, value: Self::Value) -> Self::Stored;
32 pub trait QueryCache: QueryStorage + Sized {
33 type Key: Hash + Eq + Clone + Debug;
35 /// Checks if the query is already computed and in the cache.
36 /// It returns the shard index and a lock guard to the shard,
37 /// which will be used if the query is not in the cache and we need
42 // `on_hit` can be called while holding a lock to the query state shard.
46 OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
48 fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
50 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
53 pub struct DefaultCacheSelector<K>(PhantomData<K>);
55 impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector<K> {
56 type Cache = DefaultCache<K, V>
59 type ArenaCache = ArenaCache<'tcx, K, V>;
62 pub struct DefaultCache<K, V> {
63 #[cfg(parallel_compiler)]
64 cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
65 #[cfg(not(parallel_compiler))]
66 cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
69 impl<K, V> Default for DefaultCache<K, V> {
70 fn default() -> Self {
71 DefaultCache { cache: Default::default() }
75 impl<K: Eq + Hash, V: Clone + Debug> QueryStorage for DefaultCache<K, V> {
80 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
81 // We have no dedicated storage
86 impl<K, V> QueryCache for DefaultCache<K, V>
88 K: Eq + Hash + Clone + Debug,
94 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
96 OnHit: FnOnce(&V, DepNodeIndex) -> R,
98 let key_hash = sharded::make_hash(key);
99 #[cfg(parallel_compiler)]
100 let lock = self.cache.get_shard_by_hash(key_hash).lock();
101 #[cfg(not(parallel_compiler))]
102 let lock = self.cache.lock();
103 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
105 if let Some((_, value)) = result {
106 let hit_result = on_hit(&value.0, value.1);
114 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
115 #[cfg(parallel_compiler)]
116 let mut lock = self.cache.get_shard_by_value(&key).lock();
117 #[cfg(not(parallel_compiler))]
118 let mut lock = self.cache.lock();
119 // We may be overwriting another value. This is all right, since the dep-graph
120 // will check that the fingerprint matches.
121 lock.insert(key, (value.clone(), index));
125 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
126 #[cfg(parallel_compiler)]
128 let shards = self.cache.lock_shards();
129 for shard in shards.iter() {
130 for (k, v) in shard.iter() {
135 #[cfg(not(parallel_compiler))]
137 let map = self.cache.lock();
138 for (k, v) in map.iter() {
145 pub struct ArenaCache<'tcx, K, V> {
146 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
147 #[cfg(parallel_compiler)]
148 cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
149 #[cfg(not(parallel_compiler))]
150 cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
153 impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
154 fn default() -> Self {
155 ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
159 impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
161 type Stored = &'tcx V;
164 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
165 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
166 let value = unsafe { &*(&value.0 as *const _) };
171 impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
173 K: Eq + Hash + Clone + Debug,
179 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
181 OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
183 let key_hash = sharded::make_hash(key);
184 #[cfg(parallel_compiler)]
185 let lock = self.cache.get_shard_by_hash(key_hash).lock();
186 #[cfg(not(parallel_compiler))]
187 let lock = self.cache.lock();
188 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
190 if let Some((_, value)) = result {
191 let hit_result = on_hit(&&value.0, value.1);
199 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
200 let value = self.arena.alloc((value, index));
201 let value = unsafe { &*(value as *const _) };
202 #[cfg(parallel_compiler)]
203 let mut lock = self.cache.get_shard_by_value(&key).lock();
204 #[cfg(not(parallel_compiler))]
205 let mut lock = self.cache.lock();
206 // We may be overwriting another value. This is all right, since the dep-graph
207 // will check that the fingerprint matches.
208 lock.insert(key, value);
212 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
213 #[cfg(parallel_compiler)]
215 let shards = self.cache.lock_shards();
216 for shard in shards.iter() {
217 for (k, v) in shard.iter() {
222 #[cfg(not(parallel_compiler))]
224 let map = self.cache.lock();
225 for (k, v) in map.iter() {
232 pub struct VecCacheSelector<K>(PhantomData<K>);
234 impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
235 type Cache = VecCache<K, V>
238 type ArenaCache = VecArenaCache<'tcx, K, V>;
241 pub struct VecCache<K: Idx, V> {
242 #[cfg(parallel_compiler)]
243 cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
244 #[cfg(not(parallel_compiler))]
245 cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
248 impl<K: Idx, V> Default for VecCache<K, V> {
249 fn default() -> Self {
250 VecCache { cache: Default::default() }
254 impl<K: Eq + Idx, V: Clone + Debug> QueryStorage for VecCache<K, V> {
259 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
260 // We have no dedicated storage
265 impl<K, V> QueryCache for VecCache<K, V>
267 K: Eq + Idx + Clone + Debug,
273 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
275 OnHit: FnOnce(&V, DepNodeIndex) -> R,
277 #[cfg(parallel_compiler)]
278 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
279 #[cfg(not(parallel_compiler))]
280 let lock = self.cache.lock();
281 if let Some(Some(value)) = lock.get(*key) {
282 let hit_result = on_hit(&value.0, value.1);
290 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
291 #[cfg(parallel_compiler)]
292 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
293 #[cfg(not(parallel_compiler))]
294 let mut lock = self.cache.lock();
295 lock.insert(key, (value.clone(), index));
299 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
300 #[cfg(parallel_compiler)]
302 let shards = self.cache.lock_shards();
303 for shard in shards.iter() {
304 for (k, v) in shard.iter_enumerated() {
311 #[cfg(not(parallel_compiler))]
313 let map = self.cache.lock();
314 for (k, v) in map.iter_enumerated() {
323 pub struct VecArenaCache<'tcx, K: Idx, V> {
324 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
325 #[cfg(parallel_compiler)]
326 cache: Sharded<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
327 #[cfg(not(parallel_compiler))]
328 cache: Lock<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
331 impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> {
332 fn default() -> Self {
334 arena: WorkerLocal::new(|_| TypedArena::default()),
335 cache: Default::default(),
340 impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> {
342 type Stored = &'tcx V;
345 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
346 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
347 let value = unsafe { &*(&value.0 as *const _) };
352 impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V>
354 K: Eq + Idx + Clone + Debug,
360 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
362 OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
364 #[cfg(parallel_compiler)]
365 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
366 #[cfg(not(parallel_compiler))]
367 let lock = self.cache.lock();
368 if let Some(Some(value)) = lock.get(*key) {
369 let hit_result = on_hit(&&value.0, value.1);
377 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
378 let value = self.arena.alloc((value, index));
379 let value = unsafe { &*(value as *const _) };
380 #[cfg(parallel_compiler)]
381 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
382 #[cfg(not(parallel_compiler))]
383 let mut lock = self.cache.lock();
384 lock.insert(key, value);
388 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
389 #[cfg(parallel_compiler)]
391 let shards = self.cache.lock_shards();
392 for shard in shards.iter() {
393 for (k, v) in shard.iter_enumerated() {
400 #[cfg(not(parallel_compiler))]
402 let map = self.cache.lock();
403 for (k, v) in map.iter_enumerated() {