1 use crate::dep_graph::DepNodeIndex;
3 use rustc_arena::TypedArena;
4 use rustc_data_structures::fx::FxHashMap;
5 use rustc_data_structures::sharded;
6 #[cfg(parallel_compiler)]
7 use rustc_data_structures::sharded::Sharded;
8 #[cfg(not(parallel_compiler))]
9 use rustc_data_structures::sync::Lock;
10 use rustc_data_structures::sync::WorkerLocal;
11 use rustc_index::vec::{Idx, IndexVec};
12 use std::default::Default;
15 use std::marker::PhantomData;
17 pub trait CacheSelector<'tcx, V> {
24 pub trait QueryStorage {
28 /// Store a value without putting it in the cache.
29 /// This is meant to be used with cycle errors.
30 fn store_nocache(&self, value: Self::Value) -> Self::Stored;
33 pub trait QueryCache: QueryStorage + Sized {
34 type Key: Hash + Eq + Clone + Debug;
36 /// Checks if the query is already computed and in the cache.
37 /// It returns the shard index and a lock guard to the shard,
38 /// which will be used if the query is not in the cache and we need
43 // `on_hit` can be called while holding a lock to the query state shard.
47 OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
49 fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
51 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
54 pub struct DefaultCacheSelector<K>(PhantomData<K>);
56 impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector<K> {
57 type Cache = DefaultCache<K, V>
60 type ArenaCache = ArenaCache<'tcx, K, V>;
63 pub struct DefaultCache<K, V> {
64 #[cfg(parallel_compiler)]
65 cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
66 #[cfg(not(parallel_compiler))]
67 cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
70 impl<K, V> Default for DefaultCache<K, V> {
71 fn default() -> Self {
72 DefaultCache { cache: Default::default() }
76 impl<K: Eq + Hash, V: Clone + Debug> QueryStorage for DefaultCache<K, V> {
81 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
82 // We have no dedicated storage
87 impl<K, V> QueryCache for DefaultCache<K, V>
89 K: Eq + Hash + Clone + Debug,
95 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
97 OnHit: FnOnce(&V, DepNodeIndex) -> R,
99 let key_hash = sharded::make_hash(key);
100 #[cfg(parallel_compiler)]
101 let lock = self.cache.get_shard_by_hash(key_hash).lock();
102 #[cfg(not(parallel_compiler))]
103 let lock = self.cache.lock();
104 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
106 if let Some((_, value)) = result {
107 let hit_result = on_hit(&value.0, value.1);
115 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
116 #[cfg(parallel_compiler)]
117 let mut lock = self.cache.get_shard_by_value(&key).lock();
118 #[cfg(not(parallel_compiler))]
119 let mut lock = self.cache.lock();
120 // We may be overwriting another value. This is all right, since the dep-graph
121 // will check that the fingerprint matches.
122 lock.insert(key, (value.clone(), index));
126 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
127 #[cfg(parallel_compiler)]
129 let shards = self.cache.lock_shards();
130 for shard in shards.iter() {
131 for (k, v) in shard.iter() {
136 #[cfg(not(parallel_compiler))]
138 let map = self.cache.lock();
139 for (k, v) in map.iter() {
146 pub struct ArenaCache<'tcx, K, V> {
147 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
148 #[cfg(parallel_compiler)]
149 cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
150 #[cfg(not(parallel_compiler))]
151 cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
154 impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
155 fn default() -> Self {
156 ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
160 impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
162 type Stored = &'tcx V;
165 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
166 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
167 let value = unsafe { &*(&value.0 as *const _) };
172 impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
174 K: Eq + Hash + Clone + Debug,
180 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
182 OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
184 let key_hash = sharded::make_hash(key);
185 #[cfg(parallel_compiler)]
186 let lock = self.cache.get_shard_by_hash(key_hash).lock();
187 #[cfg(not(parallel_compiler))]
188 let lock = self.cache.lock();
189 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
191 if let Some((_, value)) = result {
192 let hit_result = on_hit(&&value.0, value.1);
200 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
201 let value = self.arena.alloc((value, index));
202 let value = unsafe { &*(value as *const _) };
203 #[cfg(parallel_compiler)]
204 let mut lock = self.cache.get_shard_by_value(&key).lock();
205 #[cfg(not(parallel_compiler))]
206 let mut lock = self.cache.lock();
207 // We may be overwriting another value. This is all right, since the dep-graph
208 // will check that the fingerprint matches.
209 lock.insert(key, value);
213 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
214 #[cfg(parallel_compiler)]
216 let shards = self.cache.lock_shards();
217 for shard in shards.iter() {
218 for (k, v) in shard.iter() {
223 #[cfg(not(parallel_compiler))]
225 let map = self.cache.lock();
226 for (k, v) in map.iter() {
233 pub struct VecCacheSelector<K>(PhantomData<K>);
235 impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
236 type Cache = VecCache<K, V>
239 type ArenaCache = VecArenaCache<'tcx, K, V>;
242 pub struct VecCache<K: Idx, V> {
243 #[cfg(parallel_compiler)]
244 cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
245 #[cfg(not(parallel_compiler))]
246 cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
249 impl<K: Idx, V> Default for VecCache<K, V> {
250 fn default() -> Self {
251 VecCache { cache: Default::default() }
255 impl<K: Eq + Idx, V: Clone + Debug> QueryStorage for VecCache<K, V> {
260 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
261 // We have no dedicated storage
266 impl<K, V> QueryCache for VecCache<K, V>
268 K: Eq + Idx + Clone + Debug,
274 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
276 OnHit: FnOnce(&V, DepNodeIndex) -> R,
278 #[cfg(parallel_compiler)]
279 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
280 #[cfg(not(parallel_compiler))]
281 let lock = self.cache.lock();
282 if let Some(Some(value)) = lock.get(*key) {
283 let hit_result = on_hit(&value.0, value.1);
291 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
292 #[cfg(parallel_compiler)]
293 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
294 #[cfg(not(parallel_compiler))]
295 let mut lock = self.cache.lock();
296 lock.insert(key, (value.clone(), index));
300 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
301 #[cfg(parallel_compiler)]
303 let shards = self.cache.lock_shards();
304 for shard in shards.iter() {
305 for (k, v) in shard.iter_enumerated() {
312 #[cfg(not(parallel_compiler))]
314 let map = self.cache.lock();
315 for (k, v) in map.iter_enumerated() {
324 pub struct VecArenaCache<'tcx, K: Idx, V> {
325 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
326 #[cfg(parallel_compiler)]
327 cache: Sharded<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
328 #[cfg(not(parallel_compiler))]
329 cache: Lock<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
332 impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> {
333 fn default() -> Self {
335 arena: WorkerLocal::new(|_| TypedArena::default()),
336 cache: Default::default(),
341 impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> {
343 type Stored = &'tcx V;
346 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
347 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
348 let value = unsafe { &*(&value.0 as *const _) };
353 impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V>
355 K: Eq + Idx + Clone + Debug,
361 fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
363 OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
365 #[cfg(parallel_compiler)]
366 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
367 #[cfg(not(parallel_compiler))]
368 let lock = self.cache.lock();
369 if let Some(Some(value)) = lock.get(*key) {
370 let hit_result = on_hit(&&value.0, value.1);
378 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
379 let value = self.arena.alloc((value, index));
380 let value = unsafe { &*(value as *const _) };
381 #[cfg(parallel_compiler)]
382 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
383 #[cfg(not(parallel_compiler))]
384 let mut lock = self.cache.lock();
385 lock.insert(key, value);
389 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
390 #[cfg(parallel_compiler)]
392 let shards = self.cache.lock_shards();
393 for shard in shards.iter() {
394 for (k, v) in shard.iter_enumerated() {
401 #[cfg(not(parallel_compiler))]
403 let map = self.cache.lock();
404 for (k, v) in map.iter_enumerated() {