1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
5 use crate::dep_graph::{DepKind, DepNode};
6 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
7 use crate::query::caches::QueryCache;
8 use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
9 use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
10 use crate::query::QueryContext;
12 #[cfg(not(parallel_compiler))]
13 use rustc_data_structures::cold_path;
14 use rustc_data_structures::fingerprint::Fingerprint;
15 use rustc_data_structures::fx::{FxHashMap, FxHasher};
16 use rustc_data_structures::sharded::Sharded;
17 use rustc_data_structures::sync::{Lock, LockGuard};
18 use rustc_data_structures::thin_vec::ThinVec;
19 use rustc_errors::{Diagnostic, FatalError};
20 use rustc_span::source_map::DUMMY_SP;
22 use std::collections::hash_map::Entry;
23 use std::convert::TryFrom;
25 use std::hash::{Hash, Hasher};
27 use std::num::NonZeroU32;
29 #[cfg(debug_assertions)]
30 use std::sync::atomic::{AtomicUsize, Ordering};
32 pub struct QueryStateShard<CTX: QueryContext, K, C> {
34 active: FxHashMap<K, QueryResult<CTX>>,
36 /// Used to generate unique ids for active jobs.
40 impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
41 fn default() -> QueryStateShard<CTX, K, C> {
42 QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
46 pub struct QueryState<CTX: QueryContext, C: QueryCache> {
48 shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
49 #[cfg(debug_assertions)]
50 pub cache_hits: AtomicUsize,
53 impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
55 pub(super) fn get_lookup<'tcx>(
58 ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
59 // We compute the key's hash once and then use it for both the
60 // shard lookup and the hashmap lookup. This relies on the fact
61 // that both of them use `FxHasher`.
62 let mut hasher = FxHasher::default();
63 key.hash(&mut hasher);
64 let key_hash = hasher.finish();
66 let shard = self.shards.get_shard_index_by_hash(key_hash);
67 let lock = self.shards.get_shard_by_index(shard).lock();
68 QueryLookup { key_hash, shard, lock }
72 /// Indicates the state of a query for a given key in a query map.
73 enum QueryResult<CTX: QueryContext> {
74 /// An already executing query. The query job can be used to await for its completion.
75 Started(QueryJob<CTX>),
77 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
82 impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
83 pub fn iter_results<R>(
85 f: impl for<'a> FnOnce(
86 Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
89 self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
92 pub fn all_inactive(&self) -> bool {
93 let shards = self.shards.lock_shards();
94 shards.iter().all(|shard| shard.active.is_empty())
97 pub fn try_collect_active_jobs(
100 make_query: fn(C::Key) -> CTX::Query,
101 jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
106 // We use try_lock_shards here since we are called from the
107 // deadlock handler, and this shouldn't be locked.
108 let shards = self.shards.try_lock_shards()?;
109 let shards = shards.iter().enumerate();
110 jobs.extend(shards.flat_map(|(shard_id, shard)| {
111 shard.active.iter().filter_map(move |(k, v)| {
112 if let QueryResult::Started(ref job) = *v {
114 QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
115 let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
116 Some((id, QueryJobInfo { info, job: job.clone() }))
127 impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
128 fn default() -> QueryState<CTX, C> {
131 shards: Default::default(),
132 #[cfg(debug_assertions)]
133 cache_hits: AtomicUsize::new(0),
138 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
139 pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
140 pub(super) key_hash: u64,
142 pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
145 /// A type representing the responsibility to execute the job in the `job` field.
146 /// This will poison the relevant query if dropped.
147 struct JobOwner<'tcx, CTX: QueryContext, C>
150 C::Key: Eq + Hash + Clone + Debug,
152 state: &'tcx QueryState<CTX, C>,
154 id: QueryJobId<CTX::DepKind>,
157 impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
160 C::Key: Eq + Hash + Clone + Debug,
162 /// Either gets a `JobOwner` corresponding the query, allowing us to
163 /// start executing the query, or returns with the result of the query.
164 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
165 /// If the query is executing elsewhere, this will wait for it and return the result.
166 /// If the query panicked, this will silently panic.
168 /// This function is inlined because that results in a noticeable speed-up
169 /// for some compile-time benchmarks.
171 fn try_start<'a, 'b, Q>(
175 mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
176 ) -> TryGetJob<'b, CTX, C>
178 Q: QueryDescription<CTX, Key = C::Key, Stored = C::Stored, Value = C::Value, Cache = C>,
181 let lock = &mut *lookup.lock;
183 let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
184 Entry::Occupied(mut entry) => {
185 match entry.get_mut() {
186 QueryResult::Started(job) => {
187 // For parallel queries, we'll block and wait until the query running
188 // in another thread has completed. Record how long we wait in the
190 let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
191 Some(tcx.profiler().query_blocked())
196 // Create the id of the job we're waiting for
197 let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
199 (job.latch(id), _query_blocked_prof_timer)
201 QueryResult::Poisoned => FatalError.raise(),
204 Entry::Vacant(entry) => {
205 // No job entry for this query. Return a new one to be started later.
207 // Generate an id unique within this shard.
208 let id = lock.jobs.checked_add(1).unwrap();
210 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
212 let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
214 let job = tcx.current_query_job();
215 let job = QueryJob::new(id, span, job);
217 entry.insert(QueryResult::Started(job));
220 JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
221 return TryGetJob::NotYetStarted(owner);
224 mem::drop(lookup.lock);
226 // If we are single-threaded we know that we have cycle error,
227 // so we just return the error.
228 #[cfg(not(parallel_compiler))]
229 return TryGetJob::Cycle(cold_path(|| {
230 let value = Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span));
231 Q::query_state(tcx).cache.store_nocache(value)
234 // With parallel queries we might just have to wait on some other
236 #[cfg(parallel_compiler)]
238 let result = latch.wait_on(tcx, span);
240 if let Err(cycle) = result {
241 let value = Q::handle_cycle_error(tcx, cycle);
242 let value = Q::query_state(tcx).cache.store_nocache(value);
243 return TryGetJob::Cycle(value);
246 let cached = try_get_cached(
250 |value, index| (value.clone(), index),
251 |_, _| panic!("value must be in cache after waiting"),
254 if let Some(prof_timer) = _query_blocked_prof_timer.take() {
255 prof_timer.finish_with_query_invocation_id(cached.1.into());
258 return TryGetJob::JobCompleted(cached);
262 /// Completes the query by updating the query cache with the `result`,
263 /// signals the waiter and forgets the JobOwner, so it won't poison the query
265 fn complete(self, tcx: CTX, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
266 // We can move out of `self` here because we `mem::forget` it below
267 let key = unsafe { ptr::read(&self.key) };
268 let state = self.state;
270 // Forget ourself so our destructor won't poison the query
273 let (job, result) = {
274 let mut lock = state.shards.get_shard_by_value(&key).lock();
275 let job = match lock.active.remove(&key).unwrap() {
276 QueryResult::Started(job) => job,
277 QueryResult::Poisoned => panic!(),
279 let result = state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
283 job.signal_complete();
289 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
291 F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
293 let diagnostics = Lock::new(ThinVec::new());
294 let result = f(Some(&diagnostics));
295 (result, diagnostics.into_inner())
298 impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
300 C::Key: Eq + Hash + Clone + Debug,
305 // Poison the query so jobs waiting on it panic.
306 let state = self.state;
307 let shard = state.shards.get_shard_by_value(&self.key);
309 let mut shard = shard.lock();
310 let job = match shard.active.remove(&self.key).unwrap() {
311 QueryResult::Started(job) => job,
312 QueryResult::Poisoned => panic!(),
314 shard.active.insert(self.key.clone(), QueryResult::Poisoned);
317 // Also signal the completion of the job, so waiters
318 // will continue execution.
319 job.signal_complete();
324 pub struct CycleError<Q> {
325 /// The query and related span that uses the cycle.
326 pub usage: Option<(Span, Q)>,
327 pub cycle: Vec<QueryInfo<Q>>,
330 /// The result of `try_start`.
331 enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
333 C::Key: Eq + Hash + Clone + Debug,
335 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
336 NotYetStarted(JobOwner<'tcx, CTX, C>),
338 /// The query was already completed.
339 /// Returns the result of the query and its dep-node index
340 /// if it succeeded or a cycle error if it failed.
341 #[cfg(parallel_compiler)]
342 JobCompleted((C::Stored, DepNodeIndex)),
344 /// Trying to execute the query resulted in a cycle.
348 /// Checks if the query is already computed and in the cache.
349 /// It returns the shard index and a lock guard to the shard,
350 /// which will be used if the query is not in the cache and we need
353 fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
355 state: &QueryState<CTX, C>,
357 // `on_hit` can be called while holding a lock to the query cache
364 OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
365 OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
371 if unlikely!(tcx.profiler().enabled()) {
372 tcx.profiler().query_cache_hit(index.into());
374 #[cfg(debug_assertions)]
376 state.cache_hits.fetch_add(1, Ordering::Relaxed);
385 fn try_execute_query<Q, CTX>(
389 lookup: QueryLookup<'_, CTX, Q::Key, <Q::Cache as QueryCache>::Sharded>,
392 Q: QueryDescription<CTX>,
395 let job = match JobOwner::try_start::<Q>(tcx, span, &key, lookup) {
396 TryGetJob::NotYetStarted(job) => job,
397 TryGetJob::Cycle(result) => return result,
398 #[cfg(parallel_compiler)]
399 TryGetJob::JobCompleted((v, index)) => {
400 tcx.dep_graph().read_index(index);
405 // Fast path for when incr. comp. is off. `to_dep_node` is
406 // expensive for some `DepKind`s.
407 if !tcx.dep_graph().is_fully_enabled() {
408 let null_dep_node = DepNode::new_no_params(DepKind::NULL);
409 return force_query_with_job(tcx, key, job, null_dep_node, &Q::VTABLE).0;
413 let (result, dep_node_index) = try_execute_anon_query(tcx, key, job.id, &Q::VTABLE);
415 return job.complete(tcx, result, dep_node_index);
418 let dep_node = Q::to_dep_node(tcx, &key);
421 // The diagnostics for this query will be
422 // promoted to the current session during
423 // `try_mark_green()`, so we can ignore them here.
424 let loaded = tcx.start_query(job.id, None, |tcx| {
425 let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
426 marked.map(|(prev_dep_node_index, dep_node_index)| {
428 load_from_disk_and_cache_in_memory(
440 if let Some((result, dep_node_index)) = loaded {
441 return job.complete(tcx, result, dep_node_index);
445 let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, &Q::VTABLE);
446 tcx.dep_graph().read_index(dep_node_index);
450 fn try_execute_anon_query<CTX, K, V>(
453 job_id: QueryJobId<CTX::DepKind>,
454 query: &QueryVtable<CTX, K, V>,
455 ) -> (V, DepNodeIndex)
459 debug_assert!(query.anon);
460 let prof_timer = tcx.profiler().query_provider();
462 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
463 tcx.start_query(job_id, diagnostics, |tcx| {
464 tcx.dep_graph().with_anon_task(query.dep_kind, || query.compute(tcx, key))
468 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
470 tcx.dep_graph().read_index(dep_node_index);
472 if unlikely!(!diagnostics.is_empty()) {
473 tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
476 (result, dep_node_index)
479 fn load_from_disk_and_cache_in_memory<CTX, K, V>(
482 prev_dep_node_index: SerializedDepNodeIndex,
483 dep_node_index: DepNodeIndex,
484 dep_node: &DepNode<CTX::DepKind>,
485 query: &QueryVtable<CTX, K, V>,
490 // Note this function can be called concurrently from the same query
491 // We must ensure that this is handled correctly.
493 debug_assert!(tcx.dep_graph().is_green(dep_node));
495 // First we try to load the result from the on-disk cache.
496 let result = if query.cache_on_disk(tcx, &key, None) {
497 let prof_timer = tcx.profiler().incr_cache_loading();
498 let result = query.try_load_from_disk(tcx, prev_dep_node_index);
499 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
501 // We always expect to find a cached result for things that
502 // can be forced from `DepNode`.
504 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
505 "missing on-disk cache entry for {:?}",
510 // Some things are never cached on disk.
514 let result = if let Some(result) = result {
517 // We could not load a result from the on-disk cache, so
519 let prof_timer = tcx.profiler().query_provider();
521 // The dep-graph for this computation is already in-place.
522 let result = tcx.dep_graph().with_ignore(|| query.compute(tcx, key));
524 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
529 // If `-Zincremental-verify-ich` is specified, re-hash results from
530 // the cache and make sure that they have the expected fingerprint.
531 if unlikely!(tcx.incremental_verify_ich()) {
532 incremental_verify_ich(tcx, &result, dep_node, dep_node_index, query);
540 fn incremental_verify_ich<CTX, K, V>(
543 dep_node: &DepNode<CTX::DepKind>,
544 dep_node_index: DepNodeIndex,
545 query: &QueryVtable<CTX, K, V>,
550 Some(tcx.dep_graph().fingerprint_of(dep_node_index))
551 == tcx.dep_graph().prev_fingerprint_of(dep_node),
552 "fingerprint for green query instance not loaded from cache: {:?}",
556 debug!("BEGIN verify_ich({:?})", dep_node);
557 let mut hcx = tcx.create_stable_hashing_context();
559 let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
560 debug!("END verify_ich({:?})", dep_node);
562 let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
564 assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
568 fn force_query_with_job<C, CTX>(
571 job: JobOwner<'_, CTX, C>,
572 dep_node: DepNode<CTX::DepKind>,
573 query: &QueryVtable<CTX, C::Key, C::Value>,
574 ) -> (C::Stored, DepNodeIndex)
577 C::Key: Eq + Clone + Debug,
581 // If the following assertion triggers, it can have two reasons:
582 // 1. Something is wrong with DepNode creation, either here or
583 // in `DepGraph::try_mark_green()`.
584 // 2. Two distinct query keys get mapped to the same `DepNode`
585 // (see for example #48923).
587 !tcx.dep_graph().dep_node_exists(&dep_node),
588 "forcing query with already existing `DepNode`\n\
595 let prof_timer = tcx.profiler().query_provider();
597 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
598 tcx.start_query(job.id, diagnostics, |tcx| {
599 if query.eval_always {
600 tcx.dep_graph().with_eval_always_task(
608 tcx.dep_graph().with_task(dep_node, tcx, key, query.compute, query.hash_result)
613 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
615 if unlikely!(!diagnostics.is_empty()) {
616 if dep_node.kind != DepKind::NULL {
617 tcx.store_diagnostics(dep_node_index, diagnostics);
621 let result = job.complete(tcx, result, dep_node_index);
623 (result, dep_node_index)
627 pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Stored
629 Q: QueryDescription<CTX>,
632 debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
639 tcx.dep_graph().read_index(index);
642 |key, lookup| try_execute_query::<Q, _>(tcx, span, key, lookup),
646 /// Ensure that either this query has all green inputs or been executed.
647 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
649 /// This function is particularly useful when executing passes for their
650 /// side-effects -- e.g., in order to report errors for erroneous programs.
652 /// Note: The optimization is only available during incr. comp.
653 pub fn ensure_query<Q, CTX>(tcx: CTX, key: Q::Key)
655 Q: QueryDescription<CTX>,
659 let _ = get_query::<Q, _>(tcx, DUMMY_SP, key);
663 // Ensuring an anonymous query makes no sense
666 let dep_node = Q::to_dep_node(tcx, &key);
668 match tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node) {
670 // A None return from `try_mark_green_and_read` means that this is either
671 // a new dep node or that the dep node has already been marked red.
672 // Either way, we can't call `dep_graph.read()` as we don't have the
673 // DepNodeIndex. We must invoke the query itself. The performance cost
674 // this introduces should be negligible as we'll immediately hit the
675 // in-memory cache, or another query down the line will.
676 let _ = get_query::<Q, _>(tcx, DUMMY_SP, key);
678 Some((_, dep_node_index)) => {
679 tcx.profiler().query_cache_hit(dep_node_index.into());
684 pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
686 Q: QueryDescription<CTX>,
689 // We may be concurrently trying both execute and force a query.
690 // Ensure that only one of them runs the query.
697 // Cache hit, do nothing
700 let job = match JobOwner::try_start::<Q>(tcx, span, &key, lookup) {
701 TryGetJob::NotYetStarted(job) => job,
702 TryGetJob::Cycle(_) => return,
703 #[cfg(parallel_compiler)]
704 TryGetJob::JobCompleted(_) => return,
706 force_query_with_job(tcx, key, job, dep_node, &Q::VTABLE);