1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
5 use crate::dep_graph::{DepKind, DepContext, DepNode};
6 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
7 use crate::query::caches::QueryCache;
8 use crate::query::config::{QueryContext, QueryDescription};
9 use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
10 use crate::HashStableContextProvider;
12 #[cfg(not(parallel_compiler))]
13 use rustc_data_structures::cold_path;
14 use rustc_data_structures::fingerprint::Fingerprint;
15 use rustc_data_structures::fx::{FxHashMap, FxHasher};
16 use rustc_data_structures::sharded::Sharded;
17 use rustc_data_structures::sync::{Lock, LockGuard};
18 use rustc_data_structures::thin_vec::ThinVec;
19 use rustc_errors::{Diagnostic, FatalError};
20 use rustc_span::source_map::DUMMY_SP;
22 use std::collections::hash_map::Entry;
23 use std::convert::TryFrom;
25 use std::hash::{Hash, Hasher};
27 use std::num::NonZeroU32;
29 #[cfg(debug_assertions)]
30 use std::sync::atomic::{AtomicUsize, Ordering};
32 pub struct QueryStateShard<CTX: QueryContext, K, C> {
34 active: FxHashMap<K, QueryResult<CTX>>,
36 /// Used to generate unique ids for active jobs.
40 impl<CTX: QueryContext, K, C> QueryStateShard<CTX, K, C> {
41 fn get_cache(&mut self) -> &mut C {
46 impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
47 fn default() -> QueryStateShard<CTX, K, C> {
48 QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
52 pub struct QueryState<CTX: QueryContext, C: QueryCache<CTX>> {
54 shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
55 #[cfg(debug_assertions)]
56 pub cache_hits: AtomicUsize,
59 impl<CTX: QueryContext, C: QueryCache<CTX>> QueryState<CTX, C> {
60 pub(super) fn get_lookup<'tcx, K2: Hash>(
63 ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
64 // We compute the key's hash once and then use it for both the
65 // shard lookup and the hashmap lookup. This relies on the fact
66 // that both of them use `FxHasher`.
67 let mut hasher = FxHasher::default();
68 key.hash(&mut hasher);
69 let key_hash = hasher.finish();
71 let shard = self.shards.get_shard_index_by_hash(key_hash);
72 let lock = self.shards.get_shard_by_index(shard).lock();
73 QueryLookup { key_hash, shard, lock }
77 /// Indicates the state of a query for a given key in a query map.
78 enum QueryResult<CTX: QueryContext> {
79 /// An already executing query. The query job can be used to await for its completion.
80 Started(QueryJob<CTX>),
82 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
87 impl<CTX: QueryContext, C: QueryCache<CTX>> QueryState<CTX, C> {
88 pub fn iter_results<R>(
90 f: impl for<'a> FnOnce(
91 Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
94 self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
97 pub fn all_inactive(&self) -> bool {
98 let shards = self.shards.lock_shards();
99 shards.iter().all(|shard| shard.active.is_empty())
102 pub fn try_collect_active_jobs(
105 make_query: fn(C::Key) -> CTX::Query,
106 jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
111 // We use try_lock_shards here since we are called from the
112 // deadlock handler, and this shouldn't be locked.
113 let shards = self.shards.try_lock_shards()?;
114 let shards = shards.iter().enumerate();
115 jobs.extend(shards.flat_map(|(shard_id, shard)| {
116 shard.active.iter().filter_map(move |(k, v)| {
117 if let QueryResult::Started(ref job) = *v {
119 QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
120 let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
121 Some((id, QueryJobInfo { info, job: job.clone() }))
132 impl<CTX: QueryContext, C: QueryCache<CTX>> Default for QueryState<CTX, C> {
133 fn default() -> QueryState<CTX, C> {
136 shards: Default::default(),
137 #[cfg(debug_assertions)]
138 cache_hits: AtomicUsize::new(0),
143 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
144 pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
145 pub(super) key_hash: u64,
147 pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
150 /// A type representing the responsibility to execute the job in the `job` field.
151 /// This will poison the relevant query if dropped.
152 struct JobOwner<'tcx, CTX: QueryContext, C>
155 C::Key: Eq + Hash + Clone + Debug,
158 state: &'tcx QueryState<CTX, C>,
160 id: QueryJobId<CTX::DepKind>,
163 impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
166 C::Key: Eq + Hash + Clone + Debug,
169 /// Either gets a `JobOwner` corresponding the query, allowing us to
170 /// start executing the query, or returns with the result of the query.
171 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
172 /// If the query is executing elsewhere, this will wait for it and return the result.
173 /// If the query panicked, this will silently panic.
175 /// This function is inlined because that results in a noticeable speed-up
176 /// for some compile-time benchmarks.
178 fn try_start<'a, 'b, Q, K>(
182 mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
183 ) -> TryGetJob<'b, CTX, C>
186 Q: QueryDescription<CTX, Key = C::Key, Value = C::Value, Cache = C>,
187 CTX: QueryContext<DepKind = K>,
189 let lock = &mut *lookup.lock;
191 let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
192 Entry::Occupied(mut entry) => {
193 match entry.get_mut() {
194 QueryResult::Started(job) => {
195 // For parallel queries, we'll block and wait until the query running
196 // in another thread has completed. Record how long we wait in the
198 let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
199 Some(tcx.profiler().query_blocked())
204 // Create the id of the job we're waiting for
205 let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
207 (job.latch(id), _query_blocked_prof_timer)
209 QueryResult::Poisoned => FatalError.raise(),
212 Entry::Vacant(entry) => {
213 // No job entry for this query. Return a new one to be started later.
215 // Generate an id unique within this shard.
216 let id = lock.jobs.checked_add(1).unwrap();
218 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
220 let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
222 let job = tcx.read_query_job(|query| QueryJob::new(id, span, query));
224 entry.insert(QueryResult::Started(job));
227 JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
228 return TryGetJob::NotYetStarted(owner);
231 mem::drop(lookup.lock);
233 // If we are single-threaded we know that we have cycle error,
234 // so we just return the error.
235 #[cfg(not(parallel_compiler))]
236 return TryGetJob::Cycle(cold_path(|| {
237 Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
240 // With parallel queries we might just have to wait on some other
242 #[cfg(parallel_compiler)]
244 let result = latch.wait_on(tcx, span);
246 if let Err(cycle) = result {
247 return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
250 let cached = try_get_cached(
254 |value, index| (value.clone(), index),
255 |_, _| panic!("value must be in cache after waiting"),
258 if let Some(prof_timer) = _query_blocked_prof_timer.take() {
259 prof_timer.finish_with_query_invocation_id(cached.1.into());
262 return TryGetJob::JobCompleted(cached);
266 /// Completes the query by updating the query cache with the `result`,
267 /// signals the waiter and forgets the JobOwner, so it won't poison the query
269 fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) {
270 // We can move out of `self` here because we `mem::forget` it below
271 let key = unsafe { ptr::read(&self.key) };
272 let state = self.state;
274 // Forget ourself so our destructor won't poison the query
278 let result = result.clone();
279 let mut lock = state.shards.get_shard_by_value(&key).lock();
280 let job = match lock.active.remove(&key).unwrap() {
281 QueryResult::Started(job) => job,
282 QueryResult::Poisoned => panic!(),
284 state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
288 job.signal_complete();
293 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
295 F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
297 let diagnostics = Lock::new(ThinVec::new());
298 let result = f(Some(&diagnostics));
299 (result, diagnostics.into_inner())
302 impl<'tcx, CTX: QueryContext, C: QueryCache<CTX>> Drop for JobOwner<'tcx, CTX, C>
304 C::Key: Eq + Hash + Clone + Debug,
310 // Poison the query so jobs waiting on it panic.
311 let state = self.state;
312 let shard = state.shards.get_shard_by_value(&self.key);
314 let mut shard = shard.lock();
315 let job = match shard.active.remove(&self.key).unwrap() {
316 QueryResult::Started(job) => job,
317 QueryResult::Poisoned => panic!(),
319 shard.active.insert(self.key.clone(), QueryResult::Poisoned);
322 // Also signal the completion of the job, so waiters
323 // will continue execution.
324 job.signal_complete();
329 pub struct CycleError<Q> {
330 /// The query and related span that uses the cycle.
331 pub usage: Option<(Span, Q)>,
332 pub cycle: Vec<QueryInfo<Q>>,
335 /// The result of `try_start`.
336 enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache<CTX>>
338 C::Key: Eq + Hash + Clone + Debug,
341 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
342 NotYetStarted(JobOwner<'tcx, CTX, C>),
344 /// The query was already completed.
345 /// Returns the result of the query and its dep-node index
346 /// if it succeeded or a cycle error if it failed.
347 #[cfg(parallel_compiler)]
348 JobCompleted((C::Value, DepNodeIndex)),
350 /// Trying to execute the query resulted in a cycle.
354 /// Checks if the query is already computed and in the cache.
355 /// It returns the shard index and a lock guard to the shard,
356 /// which will be used if the query is not in the cache and we need
359 fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
361 state: &QueryState<CTX, C>,
363 // `on_hit` can be called while holding a lock to the query cache
370 OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
371 OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
375 QueryStateShard::<CTX, C::Key, C::Sharded>::get_cache,
378 if unlikely!(tcx.profiler().enabled()) {
379 tcx.profiler().query_cache_hit(index.into());
381 #[cfg(debug_assertions)]
383 state.cache_hits.fetch_add(1, Ordering::Relaxed);
392 fn try_execute_query<Q, CTX, K>(
400 <Q::Cache as QueryCache<CTX>>::Sharded,
404 Q: QueryDescription<CTX>,
405 CTX: QueryContext<DepKind = K>,
406 CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
409 let job = match JobOwner::try_start::<Q, _>(tcx, span, &key, lookup) {
410 TryGetJob::NotYetStarted(job) => job,
411 TryGetJob::Cycle(result) => return result,
412 #[cfg(parallel_compiler)]
413 TryGetJob::JobCompleted((v, index)) => {
414 tcx.dep_graph().read_index(index);
419 // Fast path for when incr. comp. is off. `to_dep_node` is
420 // expensive for some `DepKind`s.
421 if !tcx.dep_graph().is_fully_enabled() {
422 let null_dep_node = DepNode::new_no_params(DepKind::NULL);
423 return force_query_with_job::<Q, _, _>(tcx, key, job, null_dep_node).0;
427 let prof_timer = tcx.profiler().query_provider();
429 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
430 tcx.start_query(job.id, diagnostics, |tcx| {
431 tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
435 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
437 tcx.dep_graph().read_index(dep_node_index);
439 if unlikely!(!diagnostics.is_empty()) {
440 tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
443 job.complete(tcx, &result, dep_node_index);
448 let dep_node = Q::to_dep_node(tcx, &key);
451 // The diagnostics for this query will be
452 // promoted to the current session during
453 // `try_mark_green()`, so we can ignore them here.
454 let loaded = tcx.start_query(job.id, None, |tcx| {
455 let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
456 marked.map(|(prev_dep_node_index, dep_node_index)| {
458 load_from_disk_and_cache_in_memory::<Q, _>(
469 if let Some((result, dep_node_index)) = loaded {
470 job.complete(tcx, &result, dep_node_index);
475 let (result, dep_node_index) = force_query_with_job::<Q, _, _>(tcx, key, job, dep_node);
476 tcx.dep_graph().read_index(dep_node_index);
480 fn load_from_disk_and_cache_in_memory<Q, CTX>(
483 prev_dep_node_index: SerializedDepNodeIndex,
484 dep_node_index: DepNodeIndex,
485 dep_node: &DepNode<CTX::DepKind>,
489 Q: QueryDescription<CTX>,
491 // Note this function can be called concurrently from the same query
492 // We must ensure that this is handled correctly.
494 debug_assert!(tcx.dep_graph().is_green(dep_node));
496 // First we try to load the result from the on-disk cache.
497 let result = if Q::cache_on_disk(tcx, key.clone(), None) {
498 let prof_timer = tcx.profiler().incr_cache_loading();
499 let result = Q::try_load_from_disk(tcx, prev_dep_node_index);
500 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
502 // We always expect to find a cached result for things that
503 // can be forced from `DepNode`.
505 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
506 "missing on-disk cache entry for {:?}",
511 // Some things are never cached on disk.
515 let result = if let Some(result) = result {
518 // We could not load a result from the on-disk cache, so
520 let prof_timer = tcx.profiler().query_provider();
522 // The dep-graph for this computation is already in-place.
523 let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key));
525 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
530 // If `-Zincremental-verify-ich` is specified, re-hash results from
531 // the cache and make sure that they have the expected fingerprint.
532 if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) {
533 incremental_verify_ich::<Q, _>(tcx, &result, dep_node, dep_node_index);
541 fn incremental_verify_ich<Q, CTX>(
544 dep_node: &DepNode<CTX::DepKind>,
545 dep_node_index: DepNodeIndex,
549 Q: QueryDescription<CTX>,
552 Some(tcx.dep_graph().fingerprint_of(dep_node_index))
553 == tcx.dep_graph().prev_fingerprint_of(dep_node),
554 "fingerprint for green query instance not loaded from cache: {:?}",
558 debug!("BEGIN verify_ich({:?})", dep_node);
559 let mut hcx = tcx.create_stable_hashing_context();
561 let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
562 debug!("END verify_ich({:?})", dep_node);
564 let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
566 assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
570 fn force_query_with_job<Q, CTX, K>(
573 job: JobOwner<'_, CTX, Q::Cache>,
574 dep_node: DepNode<CTX::DepKind>,
575 ) -> (Q::Value, DepNodeIndex)
577 Q: QueryDescription<CTX>,
578 CTX: QueryContext<DepKind = K>,
579 CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
582 // If the following assertion triggers, it can have two reasons:
583 // 1. Something is wrong with DepNode creation, either here or
584 // in `DepGraph::try_mark_green()`.
585 // 2. Two distinct query keys get mapped to the same `DepNode`
586 // (see for example #48923).
588 !tcx.dep_graph().dep_node_exists(&dep_node),
589 "forcing query with already existing `DepNode`\n\
596 let prof_timer = tcx.profiler().query_provider();
598 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
599 tcx.start_query(job.id, diagnostics, |tcx| {
601 tcx.dep_graph().with_eval_always_task(
609 tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
614 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
616 if unlikely!(!diagnostics.is_empty()) {
617 if dep_node.kind != DepKind::NULL {
618 tcx.store_diagnostics(dep_node_index, diagnostics);
622 job.complete(tcx, &result, dep_node_index);
624 (result, dep_node_index)
627 pub trait QueryGetter: QueryContext {
628 fn get_query<Q: QueryDescription<Self>>(
634 /// Ensure that either this query has all green inputs or been executed.
635 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
637 /// This function is particularly useful when executing passes for their
638 /// side-effects -- e.g., in order to report errors for erroneous programs.
640 /// Note: The optimization is only available during incr. comp.
641 fn ensure_query<Q: QueryDescription<Self>>(self, key: Q::Key);
643 fn force_query<Q: QueryDescription<Self>>(
647 dep_node: DepNode<Self::DepKind>,
651 impl<CTX, K> QueryGetter for CTX
653 CTX: QueryContext<DepKind = K>,
654 CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
658 fn get_query<Q: QueryDescription<Self>>(
663 debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
667 Q::query_state(self),
670 self.dep_graph().read_index(index);
673 |key, lookup| try_execute_query::<Q, _, _>(self, span, key, lookup),
677 /// Ensure that either this query has all green inputs or been executed.
678 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
680 /// This function is particularly useful when executing passes for their
681 /// side-effects -- e.g., in order to report errors for erroneous programs.
683 /// Note: The optimization is only available during incr. comp.
684 fn ensure_query<Q: QueryDescription<Self>>(self, key: Q::Key) {
686 let _ = self.get_query::<Q>(DUMMY_SP, key);
690 // Ensuring an anonymous query makes no sense
693 let dep_node = Q::to_dep_node(self, &key);
695 match self.dep_graph().try_mark_green_and_read(self, &dep_node) {
697 // A None return from `try_mark_green_and_read` means that this is either
698 // a new dep node or that the dep node has already been marked red.
699 // Either way, we can't call `dep_graph.read()` as we don't have the
700 // DepNodeIndex. We must invoke the query itself. The performance cost
701 // this introduces should be negligible as we'll immediately hit the
702 // in-memory cache, or another query down the line will.
703 let _ = self.get_query::<Q>(DUMMY_SP, key);
705 Some((_, dep_node_index)) => {
706 self.profiler().query_cache_hit(dep_node_index.into());
711 fn force_query<Q: QueryDescription<Self>>(
715 dep_node: DepNode<Self::DepKind>,
717 // We may be concurrently trying both execute and force a query.
718 // Ensure that only one of them runs the query.
722 Q::query_state(self),
725 // Cache hit, do nothing
728 let job = match JobOwner::try_start::<Q, _>(self, span, &key, lookup) {
729 TryGetJob::NotYetStarted(job) => job,
730 TryGetJob::Cycle(_) => return,
731 #[cfg(parallel_compiler)]
732 TryGetJob::JobCompleted(_) => return,
734 force_query_with_job::<Q, _, _>(self, key, job, dep_node);