1 use crate::dep_graph::DepNodeIndex;
3 use rustc_arena::TypedArena;
4 use rustc_data_structures::fx::FxHashMap;
5 use rustc_data_structures::sharded;
6 #[cfg(parallel_compiler)]
7 use rustc_data_structures::sharded::Sharded;
8 #[cfg(not(parallel_compiler))]
9 use rustc_data_structures::sync::Lock;
10 use rustc_data_structures::sync::WorkerLocal;
11 use rustc_index::vec::{Idx, IndexVec};
14 use std::marker::PhantomData;
16 pub trait CacheSelector<'tcx, V> {
23 pub trait QueryStorage {
27 /// Store a value without putting it in the cache.
28 /// This is meant to be used with cycle errors.
29 fn store_nocache(&self, value: Self::Value) -> Self::Stored;
32 pub trait QueryCache: QueryStorage + Sized {
33 type Key: Hash + Eq + Clone + Debug;
35 /// Checks if the query is already computed and in the cache.
36 /// It returns the shard index and a lock guard to the shard,
37 /// which will be used if the query is not in the cache and we need
39 fn lookup(&self, key: &Self::Key) -> Option<(Self::Stored, DepNodeIndex)>;
41 fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
43 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
46 pub struct DefaultCacheSelector<K>(PhantomData<K>);
48 impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector<K> {
49 type Cache = DefaultCache<K, V>
52 type ArenaCache = ArenaCache<'tcx, K, V>;
55 pub struct DefaultCache<K, V> {
56 #[cfg(parallel_compiler)]
57 cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
58 #[cfg(not(parallel_compiler))]
59 cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
62 impl<K, V> Default for DefaultCache<K, V> {
63 fn default() -> Self {
64 DefaultCache { cache: Default::default() }
68 impl<K: Eq + Hash, V: Copy + Debug> QueryStorage for DefaultCache<K, V> {
73 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
74 // We have no dedicated storage
79 impl<K, V> QueryCache for DefaultCache<K, V>
81 K: Eq + Hash + Clone + Debug,
87 fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
88 let key_hash = sharded::make_hash(key);
89 #[cfg(parallel_compiler)]
90 let lock = self.cache.get_shard_by_hash(key_hash).lock();
91 #[cfg(not(parallel_compiler))]
92 let lock = self.cache.lock();
93 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
95 if let Some((_, value)) = result { Some(*value) } else { None }
99 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
100 #[cfg(parallel_compiler)]
101 let mut lock = self.cache.get_shard_by_value(&key).lock();
102 #[cfg(not(parallel_compiler))]
103 let mut lock = self.cache.lock();
104 // We may be overwriting another value. This is all right, since the dep-graph
105 // will check that the fingerprint matches.
106 lock.insert(key, (value.clone(), index));
110 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
111 #[cfg(parallel_compiler)]
113 let shards = self.cache.lock_shards();
114 for shard in shards.iter() {
115 for (k, v) in shard.iter() {
120 #[cfg(not(parallel_compiler))]
122 let map = self.cache.lock();
123 for (k, v) in map.iter() {
130 pub struct ArenaCache<'tcx, K, V> {
131 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
132 #[cfg(parallel_compiler)]
133 cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
134 #[cfg(not(parallel_compiler))]
135 cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
138 impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
139 fn default() -> Self {
140 ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
144 impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
146 type Stored = &'tcx V;
149 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
150 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
151 let value = unsafe { &*(&value.0 as *const _) };
156 impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
158 K: Eq + Hash + Clone + Debug,
164 fn lookup(&self, key: &K) -> Option<(&'tcx V, DepNodeIndex)> {
165 let key_hash = sharded::make_hash(key);
166 #[cfg(parallel_compiler)]
167 let lock = self.cache.get_shard_by_hash(key_hash).lock();
168 #[cfg(not(parallel_compiler))]
169 let lock = self.cache.lock();
170 let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
172 if let Some((_, value)) = result { Some((&value.0, value.1)) } else { None }
176 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
177 let value = self.arena.alloc((value, index));
178 let value = unsafe { &*(value as *const _) };
179 #[cfg(parallel_compiler)]
180 let mut lock = self.cache.get_shard_by_value(&key).lock();
181 #[cfg(not(parallel_compiler))]
182 let mut lock = self.cache.lock();
183 // We may be overwriting another value. This is all right, since the dep-graph
184 // will check that the fingerprint matches.
185 lock.insert(key, value);
189 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
190 #[cfg(parallel_compiler)]
192 let shards = self.cache.lock_shards();
193 for shard in shards.iter() {
194 for (k, v) in shard.iter() {
199 #[cfg(not(parallel_compiler))]
201 let map = self.cache.lock();
202 for (k, v) in map.iter() {
209 pub struct VecCacheSelector<K>(PhantomData<K>);
211 impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
212 type Cache = VecCache<K, V>
215 type ArenaCache = VecArenaCache<'tcx, K, V>;
218 pub struct VecCache<K: Idx, V> {
219 #[cfg(parallel_compiler)]
220 cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
221 #[cfg(not(parallel_compiler))]
222 cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
225 impl<K: Idx, V> Default for VecCache<K, V> {
226 fn default() -> Self {
227 VecCache { cache: Default::default() }
231 impl<K: Eq + Idx, V: Copy + Debug> QueryStorage for VecCache<K, V> {
236 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
237 // We have no dedicated storage
242 impl<K, V> QueryCache for VecCache<K, V>
244 K: Eq + Idx + Clone + Debug,
250 fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
251 #[cfg(parallel_compiler)]
252 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
253 #[cfg(not(parallel_compiler))]
254 let lock = self.cache.lock();
255 if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
259 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
260 #[cfg(parallel_compiler)]
261 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
262 #[cfg(not(parallel_compiler))]
263 let mut lock = self.cache.lock();
264 lock.insert(key, (value.clone(), index));
268 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
269 #[cfg(parallel_compiler)]
271 let shards = self.cache.lock_shards();
272 for shard in shards.iter() {
273 for (k, v) in shard.iter_enumerated() {
280 #[cfg(not(parallel_compiler))]
282 let map = self.cache.lock();
283 for (k, v) in map.iter_enumerated() {
292 pub struct VecArenaCache<'tcx, K: Idx, V> {
293 arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
294 #[cfg(parallel_compiler)]
295 cache: Sharded<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
296 #[cfg(not(parallel_compiler))]
297 cache: Lock<IndexVec<K, Option<&'tcx (V, DepNodeIndex)>>>,
300 impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> {
301 fn default() -> Self {
303 arena: WorkerLocal::new(|_| TypedArena::default()),
304 cache: Default::default(),
309 impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> {
311 type Stored = &'tcx V;
314 fn store_nocache(&self, value: Self::Value) -> Self::Stored {
315 let value = self.arena.alloc((value, DepNodeIndex::INVALID));
316 let value = unsafe { &*(&value.0 as *const _) };
321 impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V>
323 K: Eq + Idx + Clone + Debug,
329 fn lookup(&self, key: &K) -> Option<(&'tcx V, DepNodeIndex)> {
330 #[cfg(parallel_compiler)]
331 let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
332 #[cfg(not(parallel_compiler))]
333 let lock = self.cache.lock();
334 if let Some(Some(value)) = lock.get(*key) { Some((&value.0, value.1)) } else { None }
338 fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
339 let value = self.arena.alloc((value, index));
340 let value = unsafe { &*(value as *const _) };
341 #[cfg(parallel_compiler)]
342 let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
343 #[cfg(not(parallel_compiler))]
344 let mut lock = self.cache.lock();
345 lock.insert(key, value);
349 fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
350 #[cfg(parallel_compiler)]
352 let shards = self.cache.lock_shards();
353 for shard in shards.iter() {
354 for (k, v) in shard.iter_enumerated() {
361 #[cfg(not(parallel_compiler))]
363 let map = self.cache.lock();
364 for (k, v) in map.iter_enumerated() {