1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
5 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
6 use crate::ty::query::caches::QueryCache;
7 use crate::ty::query::config::{QueryContext, QueryDescription};
8 use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
9 use crate::ty::query::Query;
11 use crate::ty::{self, TyCtxt};
13 #[cfg(not(parallel_compiler))]
14 use rustc_data_structures::cold_path;
15 use rustc_data_structures::fx::{FxHashMap, FxHasher};
16 use rustc_data_structures::sharded::Sharded;
17 use rustc_data_structures::sync::{Lock, LockGuard};
18 use rustc_data_structures::thin_vec::ThinVec;
19 use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level};
20 use rustc_query_system::dep_graph::{DepKind, DepNode};
21 use rustc_session::Session;
22 use rustc_span::def_id::DefId;
23 use rustc_span::source_map::DUMMY_SP;
25 use std::collections::hash_map::Entry;
26 use std::convert::TryFrom;
28 use std::hash::{Hash, Hasher};
30 use std::num::NonZeroU32;
32 #[cfg(debug_assertions)]
33 use std::sync::atomic::{AtomicUsize, Ordering};
35 pub(crate) struct QueryStateShard<CTX: QueryContext, K, C> {
37 active: FxHashMap<K, QueryResult<CTX>>,
39 /// Used to generate unique ids for active jobs.
43 impl<CTX: QueryContext, K, C> QueryStateShard<CTX, K, C> {
44 fn get_cache(&mut self) -> &mut C {
49 impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
50 fn default() -> QueryStateShard<CTX, K, C> {
51 QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
55 pub(crate) struct QueryState<CTX: QueryContext, C: QueryCache<CTX>> {
57 shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
58 #[cfg(debug_assertions)]
59 pub(super) cache_hits: AtomicUsize,
62 impl<CTX: QueryContext, C: QueryCache<CTX>> QueryState<CTX, C> {
63 pub(super) fn get_lookup<K2: Hash>(
66 ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
67 // We compute the key's hash once and then use it for both the
68 // shard lookup and the hashmap lookup. This relies on the fact
69 // that both of them use `FxHasher`.
70 let mut hasher = FxHasher::default();
71 key.hash(&mut hasher);
72 let key_hash = hasher.finish();
74 let shard = self.shards.get_shard_index_by_hash(key_hash);
75 let lock = self.shards.get_shard_by_index(shard).lock();
76 QueryLookup { key_hash, shard, lock }
80 /// Indicates the state of a query for a given key in a query map.
81 enum QueryResult<CTX: QueryContext> {
82 /// An already executing query. The query job can be used to await for its completion.
83 Started(QueryJob<CTX>),
85 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
90 impl<CTX: QueryContext, C: QueryCache<CTX>> QueryState<CTX, C> {
91 pub(super) fn iter_results<R>(
93 f: impl for<'a> FnOnce(
94 Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
97 self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
99 pub(super) fn all_inactive(&self) -> bool {
100 let shards = self.shards.lock_shards();
101 shards.iter().all(|shard| shard.active.is_empty())
104 pub(super) fn try_collect_active_jobs(
107 make_query: fn(C::Key) -> CTX::Query,
108 jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
113 // We use try_lock_shards here since we are called from the
114 // deadlock handler, and this shouldn't be locked.
115 let shards = self.shards.try_lock_shards()?;
116 let shards = shards.iter().enumerate();
117 jobs.extend(shards.flat_map(|(shard_id, shard)| {
118 shard.active.iter().filter_map(move |(k, v)| {
119 if let QueryResult::Started(ref job) = *v {
121 QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
122 let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
123 Some((id, QueryJobInfo { info, job: job.clone() }))
134 impl<CTX: QueryContext, C: QueryCache<CTX>> Default for QueryState<CTX, C> {
135 fn default() -> QueryState<CTX, C> {
138 shards: Default::default(),
139 #[cfg(debug_assertions)]
140 cache_hits: AtomicUsize::new(0),
145 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
146 pub(crate) struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
147 pub(super) key_hash: u64,
149 pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
152 /// A type representing the responsibility to execute the job in the `job` field.
153 /// This will poison the relevant query if dropped.
154 struct JobOwner<'tcx, CTX: QueryContext, C>
157 C::Key: Eq + Hash + Clone + Debug,
160 state: &'tcx QueryState<CTX, C>,
162 id: QueryJobId<CTX::DepKind>,
165 impl<'tcx, C> JobOwner<'tcx, TyCtxt<'tcx>, C>
167 C: QueryCache<TyCtxt<'tcx>> + 'tcx,
168 C::Key: Eq + Hash + Clone + Debug,
171 /// Either gets a `JobOwner` corresponding the query, allowing us to
172 /// start executing the query, or returns with the result of the query.
173 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
174 /// If the query is executing elsewhere, this will wait for it and return the result.
175 /// If the query panicked, this will silently panic.
177 /// This function is inlined because that results in a noticeable speed-up
178 /// for some compile-time benchmarks.
180 fn try_start<'a, 'b, Q>(
184 mut lookup: QueryLookup<'a, TyCtxt<'tcx>, C::Key, C::Sharded>,
185 ) -> TryGetJob<'b, TyCtxt<'tcx>, C>
187 Q: QueryDescription<TyCtxt<'tcx>, Key = C::Key, Value = C::Value, Cache = C>,
189 let lock = &mut *lookup.lock;
191 let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
192 Entry::Occupied(mut entry) => {
193 match entry.get_mut() {
194 QueryResult::Started(job) => {
195 // For parallel queries, we'll block and wait until the query running
196 // in another thread has completed. Record how long we wait in the
198 let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
199 Some(tcx.prof.query_blocked())
204 // Create the id of the job we're waiting for
205 let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
207 (job.latch(id), _query_blocked_prof_timer)
209 QueryResult::Poisoned => FatalError.raise(),
212 Entry::Vacant(entry) => {
213 // No job entry for this query. Return a new one to be started later.
215 // Generate an id unique within this shard.
216 let id = lock.jobs.checked_add(1).unwrap();
218 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
220 let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
222 let job = tls::with_related_context(tcx, |icx| QueryJob::new(id, span, icx.query));
224 entry.insert(QueryResult::Started(job));
227 JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
228 return TryGetJob::NotYetStarted(owner);
231 mem::drop(lookup.lock);
233 // If we are single-threaded we know that we have cycle error,
234 // so we just return the error.
235 #[cfg(not(parallel_compiler))]
236 return TryGetJob::Cycle(cold_path(|| {
237 Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
240 // With parallel queries we might just have to wait on some other
242 #[cfg(parallel_compiler)]
244 let result = latch.wait_on(tcx, span);
246 if let Err(cycle) = result {
247 return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
250 let cached = tcx.try_get_cached(
253 |value, index| (value.clone(), index),
254 |_, _| panic!("value must be in cache after waiting"),
257 if let Some(prof_timer) = _query_blocked_prof_timer.take() {
258 prof_timer.finish_with_query_invocation_id(cached.1.into());
261 return TryGetJob::JobCompleted(cached);
266 impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
269 C::Key: Eq + Hash + Clone + Debug,
272 /// Completes the query by updating the query cache with the `result`,
273 /// signals the waiter and forgets the JobOwner, so it won't poison the query
275 fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) {
276 // We can move out of `self` here because we `mem::forget` it below
277 let key = unsafe { ptr::read(&self.key) };
278 let state = self.state;
280 // Forget ourself so our destructor won't poison the query
284 let result = result.clone();
285 let mut lock = state.shards.get_shard_by_value(&key).lock();
286 let job = match lock.active.remove(&key).unwrap() {
287 QueryResult::Started(job) => job,
288 QueryResult::Poisoned => panic!(),
290 state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
294 job.signal_complete();
299 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
301 F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
303 let diagnostics = Lock::new(ThinVec::new());
304 let result = f(Some(&diagnostics));
305 (result, diagnostics.into_inner())
308 impl<'tcx, CTX: QueryContext, C: QueryCache<CTX>> Drop for JobOwner<'tcx, CTX, C>
310 C::Key: Eq + Hash + Clone + Debug,
316 // Poison the query so jobs waiting on it panic.
317 let state = self.state;
318 let shard = state.shards.get_shard_by_value(&self.key);
320 let mut shard = shard.lock();
321 let job = match shard.active.remove(&self.key).unwrap() {
322 QueryResult::Started(job) => job,
323 QueryResult::Poisoned => panic!(),
325 shard.active.insert(self.key.clone(), QueryResult::Poisoned);
328 // Also signal the completion of the job, so waiters
329 // will continue execution.
330 job.signal_complete();
335 pub(crate) struct CycleError<Q> {
336 /// The query and related span that uses the cycle.
337 pub(super) usage: Option<(Span, Q)>,
338 pub(super) cycle: Vec<QueryInfo<Q>>,
341 /// The result of `try_start`.
342 enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache<CTX>>
344 C::Key: Eq + Hash + Clone + Debug,
347 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
348 NotYetStarted(JobOwner<'tcx, CTX, C>),
350 /// The query was already completed.
351 /// Returns the result of the query and its dep-node index
352 /// if it succeeded or a cycle error if it failed.
353 #[cfg(parallel_compiler)]
354 JobCompleted((C::Value, DepNodeIndex)),
356 /// Trying to execute the query resulted in a cycle.
360 impl QueryContext for TyCtxt<'tcx> {
361 type Query = Query<'tcx>;
363 fn session(&self) -> &Session {
367 fn def_path_str(&self, def_id: DefId) -> String {
368 TyCtxt::def_path_str(*self, def_id)
371 fn read_query_job<R>(&self, op: impl FnOnce(Option<QueryJobId<Self::DepKind>>) -> R) -> R {
372 tls::with_related_context(*self, move |icx| op(icx.query))
375 fn try_collect_active_jobs(
377 ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
378 self.queries.try_collect_active_jobs()
382 impl<'tcx> TyCtxt<'tcx> {
383 /// Executes a job by changing the `ImplicitCtxt` to point to the
384 /// new query job while it executes. It returns the diagnostics
385 /// captured during execution and the actual result.
387 fn start_query<F, R>(
389 token: QueryJobId<crate::dep_graph::DepKind>,
390 diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
394 F: FnOnce(TyCtxt<'tcx>) -> R,
396 // The `TyCtxt` stored in TLS has the same global interner lifetime
397 // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
398 // when accessing the `ImplicitCtxt`.
399 tls::with_related_context(self, move |current_icx| {
400 // Update the `ImplicitCtxt` to point to our new query job.
401 let new_icx = tls::ImplicitCtxt {
405 layout_depth: current_icx.layout_depth,
406 task_deps: current_icx.task_deps,
409 // Use the `ImplicitCtxt` while we execute the query.
410 tls::enter_context(&new_icx, |_| compute(self))
416 pub(super) fn report_cycle(
418 CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
419 ) -> DiagnosticBuilder<'tcx> {
420 assert!(!stack.is_empty());
422 let fix_span = |span: Span, query: &Query<'tcx>| {
423 self.sess.source_map().guess_head_span(query.default_span(self, span))
426 // Disable naming impls with types in this path, since that
427 // sometimes cycles itself, leading to extra cycle errors.
428 // (And cycle errors around impls tend to occur during the
429 // collect/coherence phases anyhow.)
430 ty::print::with_forced_impl_filename_line(|| {
431 let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
432 let mut err = struct_span_err!(
436 "cycle detected when {}",
437 stack[0].query.describe(self)
440 for i in 1..stack.len() {
441 let query = &stack[i].query;
442 let span = fix_span(stack[(i + 1) % stack.len()].span, query);
443 err.span_note(span, &format!("...which requires {}...", query.describe(self)));
447 "...which again requires {}, completing the cycle",
448 stack[0].query.describe(self)
451 if let Some((span, query)) = usage {
453 fix_span(span, &query),
454 &format!("cycle used when {}", query.describe(self)),
462 pub fn try_print_query_stack(handler: &Handler) {
463 eprintln!("query stack during panic:");
465 // Be careful reyling on global state here: this code is called from
466 // a panic hook, which means that the global `Handler` may be in a weird
467 // state if it was responsible for triggering the panic.
468 tls::with_context_opt(|icx| {
469 if let Some(icx) = icx {
470 let query_map = icx.tcx.queries.try_collect_active_jobs();
472 let mut current_query = icx.query;
475 while let Some(query) = current_query {
477 if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
482 let mut diag = Diagnostic::new(
487 query_info.info.query.name(),
488 query_info.info.query.describe(icx.tcx)
492 icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
493 handler.force_print_diagnostic(diag);
495 current_query = query_info.job.parent;
501 eprintln!("end of query stack");
504 /// Checks if the query is already computed and in the cache.
505 /// It returns the shard index and a lock guard to the shard,
506 /// which will be used if the query is not in the cache and we need
509 fn try_get_cached<C, R, OnHit, OnMiss>(
511 state: &'tcx QueryState<TyCtxt<'tcx>, C>,
513 // `on_hit` can be called while holding a lock to the query cache
518 C: QueryCache<TyCtxt<'tcx>>,
519 OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
520 OnMiss: FnOnce(C::Key, QueryLookup<'_, TyCtxt<'tcx>, C::Key, C::Sharded>) -> R,
524 QueryStateShard::<TyCtxt<'tcx>, C::Key, C::Sharded>::get_cache,
527 if unlikely!(self.prof.enabled()) {
528 self.prof.query_cache_hit(index.into());
530 #[cfg(debug_assertions)]
532 state.cache_hits.fetch_add(1, Ordering::Relaxed);
541 pub(super) fn get_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
546 debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
549 Q::query_state(self),
552 self.dep_graph.read_index(index);
555 |key, lookup| self.try_execute_query::<Q>(span, key, lookup),
560 fn try_execute_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
568 <Q::Cache as QueryCache<TyCtxt<'tcx>>>::Sharded,
571 let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
572 TryGetJob::NotYetStarted(job) => job,
573 TryGetJob::Cycle(result) => return result,
574 #[cfg(parallel_compiler)]
575 TryGetJob::JobCompleted((v, index)) => {
576 self.dep_graph.read_index(index);
581 // Fast path for when incr. comp. is off. `to_dep_node` is
582 // expensive for some `DepKind`s.
583 if !self.dep_graph.is_fully_enabled() {
584 let null_dep_node = DepNode::new_no_params(DepKind::NULL);
585 return self.force_query_with_job::<Q>(key, job, null_dep_node).0;
589 let prof_timer = self.prof.query_provider();
591 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
592 self.start_query(job.id, diagnostics, |tcx| {
593 tcx.dep_graph.with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
597 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
599 self.dep_graph.read_index(dep_node_index);
601 if unlikely!(!diagnostics.is_empty()) {
604 .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
607 job.complete(self, &result, dep_node_index);
612 let dep_node = Q::to_dep_node(self, &key);
615 // The diagnostics for this query will be
616 // promoted to the current session during
617 // `try_mark_green()`, so we can ignore them here.
618 let loaded = self.start_query(job.id, None, |tcx| {
619 let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
620 marked.map(|(prev_dep_node_index, dep_node_index)| {
622 tcx.load_from_disk_and_cache_in_memory::<Q>(
632 if let Some((result, dep_node_index)) = loaded {
633 job.complete(self, &result, dep_node_index);
638 let (result, dep_node_index) = self.force_query_with_job::<Q>(key, job, dep_node);
639 self.dep_graph.read_index(dep_node_index);
643 fn load_from_disk_and_cache_in_memory<Q: QueryDescription<TyCtxt<'tcx>>>(
646 prev_dep_node_index: SerializedDepNodeIndex,
647 dep_node_index: DepNodeIndex,
648 dep_node: &DepNode<crate::dep_graph::DepKind>,
650 // Note this function can be called concurrently from the same query
651 // We must ensure that this is handled correctly.
653 debug_assert!(self.dep_graph.is_green(dep_node));
655 // First we try to load the result from the on-disk cache.
656 let result = if Q::cache_on_disk(self, key.clone(), None) {
657 let prof_timer = self.prof.incr_cache_loading();
658 let result = Q::try_load_from_disk(self, prev_dep_node_index);
659 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
661 // We always expect to find a cached result for things that
662 // can be forced from `DepNode`.
664 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
665 "missing on-disk cache entry for {:?}",
670 // Some things are never cached on disk.
674 let result = if let Some(result) = result {
677 // We could not load a result from the on-disk cache, so
679 let prof_timer = self.prof.query_provider();
681 // The dep-graph for this computation is already in-place.
682 let result = self.dep_graph.with_ignore(|| Q::compute(self, key));
684 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
689 // If `-Zincremental-verify-ich` is specified, re-hash results from
690 // the cache and make sure that they have the expected fingerprint.
691 if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) {
692 self.incremental_verify_ich::<Q>(&result, dep_node, dep_node_index);
700 fn incremental_verify_ich<Q: QueryDescription<TyCtxt<'tcx>>>(
703 dep_node: &DepNode<crate::dep_graph::DepKind>,
704 dep_node_index: DepNodeIndex,
706 use rustc_data_structures::fingerprint::Fingerprint;
709 Some(self.dep_graph.fingerprint_of(dep_node_index))
710 == self.dep_graph.prev_fingerprint_of(dep_node),
711 "fingerprint for green query instance not loaded from cache: {:?}",
715 debug!("BEGIN verify_ich({:?})", dep_node);
716 let mut hcx = self.create_stable_hashing_context();
718 let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
719 debug!("END verify_ich({:?})", dep_node);
721 let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
723 assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
727 fn force_query_with_job<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
730 job: JobOwner<'tcx, Self, Q::Cache>,
731 dep_node: DepNode<crate::dep_graph::DepKind>,
732 ) -> (Q::Value, DepNodeIndex) {
733 // If the following assertion triggers, it can have two reasons:
734 // 1. Something is wrong with DepNode creation, either here or
735 // in `DepGraph::try_mark_green()`.
736 // 2. Two distinct query keys get mapped to the same `DepNode`
737 // (see for example #48923).
739 !self.dep_graph.dep_node_exists(&dep_node),
740 "forcing query with already existing `DepNode`\n\
747 let prof_timer = self.prof.query_provider();
749 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
750 self.start_query(job.id, diagnostics, |tcx| {
752 tcx.dep_graph.with_eval_always_task(
760 tcx.dep_graph.with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
765 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
767 if unlikely!(!diagnostics.is_empty()) {
768 if dep_node.kind != DepKind::NULL {
769 self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics);
773 job.complete(self, &result, dep_node_index);
775 (result, dep_node_index)
778 /// Ensure that either this query has all green inputs or been executed.
779 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
781 /// This function is particularly useful when executing passes for their
782 /// side-effects -- e.g., in order to report errors for erroneous programs.
784 /// Note: The optimization is only available during incr. comp.
785 pub(super) fn ensure_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(self, key: Q::Key) {
787 let _ = self.get_query::<Q>(DUMMY_SP, key);
791 // Ensuring an anonymous query makes no sense
794 let dep_node = Q::to_dep_node(self, &key);
796 match self.dep_graph.try_mark_green_and_read(self, &dep_node) {
798 // A None return from `try_mark_green_and_read` means that this is either
799 // a new dep node or that the dep node has already been marked red.
800 // Either way, we can't call `dep_graph.read()` as we don't have the
801 // DepNodeIndex. We must invoke the query itself. The performance cost
802 // this introduces should be negligible as we'll immediately hit the
803 // in-memory cache, or another query down the line will.
804 let _ = self.get_query::<Q>(DUMMY_SP, key);
806 Some((_, dep_node_index)) => {
807 self.prof.query_cache_hit(dep_node_index.into());
813 pub(super) fn force_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
817 dep_node: DepNode<crate::dep_graph::DepKind>,
819 // We may be concurrently trying both execute and force a query.
820 // Ensure that only one of them runs the query.
823 Q::query_state(self),
826 // Cache hit, do nothing
829 let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
830 TryGetJob::NotYetStarted(job) => job,
831 TryGetJob::Cycle(_) => return,
832 #[cfg(parallel_compiler)]
833 TryGetJob::JobCompleted(_) => return,
835 self.force_query_with_job::<Q>(key, job, dep_node);
841 macro_rules! handle_cycle_error {
842 ([][$tcx: expr, $error:expr]) => {{
843 $tcx.report_cycle($error).emit();
844 Value::from_cycle_error($tcx)
846 ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
847 $tcx.report_cycle($error).emit();
848 $tcx.sess.abort_if_errors();
851 ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
852 $tcx.report_cycle($error).delay_as_bug();
853 Value::from_cycle_error($tcx)
855 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
856 handle_cycle_error!([$($($modifiers)*)*][$($args)*])
860 macro_rules! is_anon {
864 ([anon $($rest:tt)*]) => {{
867 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
868 is_anon!([$($($modifiers)*)*])
872 macro_rules! is_eval_always {
876 ([eval_always $($rest:tt)*]) => {{
879 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
880 is_eval_always!([$($($modifiers)*)*])
884 macro_rules! query_storage {
885 (<$tcx:tt>[][$K:ty, $V:ty]) => {
886 <<$K as Key>::CacheSelector as CacheSelector<TyCtxt<$tcx>, $K, $V>>::Cache
888 (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
891 (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
892 query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*])
896 macro_rules! hash_result {
897 ([][$hcx:expr, $result:expr]) => {{
898 dep_graph::hash_result($hcx, &$result)
900 ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
903 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
904 hash_result!([$($($modifiers)*)*][$($args)*])
908 macro_rules! define_queries {
909 (<$tcx:tt> $($category:tt {
910 $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
912 define_queries_inner! { <$tcx>
913 $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)*
918 macro_rules! define_queries_inner {
920 $($(#[$attr:meta])* category<$category:tt>
921 [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
925 rustc_data_structures::stable_hasher::HashStable,
926 rustc_data_structures::stable_hasher::StableHasher,
927 ich::StableHashingContext
929 use rustc_data_structures::profiling::ProfileCategory;
931 define_queries_struct! {
933 input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
936 #[allow(nonstandard_style)]
937 #[derive(Clone, Debug)]
938 pub enum Query<$tcx> {
939 $($(#[$attr])* $name($K)),*
942 impl<$tcx> Query<$tcx> {
943 pub fn name(&self) -> &'static str {
945 $(Query::$name(_) => stringify!($name),)*
949 pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> {
950 let (r, name) = match *self {
951 $(Query::$name(key) => {
952 (queries::$name::describe(tcx, key), stringify!($name))
955 if tcx.sess.verbose() {
956 format!("{} [{}]", r, name).into()
962 // FIXME(eddyb) Get more valid `Span`s on queries.
963 pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
964 if !span.is_dummy() {
967 // The `def_span` query is used to calculate `default_span`,
968 // so exit to avoid infinite recursion.
969 if let Query::def_span(..) = *self {
973 $(Query::$name(key) => key.default_span(tcx),)*
978 impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
979 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
980 mem::discriminant(self).hash_stable(hcx, hasher);
982 $(Query::$name(key) => key.hash_stable(hcx, hasher),)*
988 use std::marker::PhantomData;
990 $(#[allow(nonstandard_style)]
991 pub struct $name<$tcx> {
992 data: PhantomData<&$tcx ()>
996 $(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
999 const NAME: &'static str = stringify!($name);
1000 const CATEGORY: ProfileCategory = $category;
1003 impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
1004 const ANON: bool = is_anon!([$($modifiers)*]);
1005 const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
1006 const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
1008 type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]);
1011 fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
1017 fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode {
1018 DepConstructor::$node(tcx, *key)
1022 fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
1023 let provider = tcx.queries.providers.get(key.query_crate())
1024 // HACK(eddyb) it's possible crates may be loaded after
1025 // the query engine is created, and because crate loading
1026 // is not yet integrated with the query engine, such crates
1027 // would be missing appropriate entries in `providers`.
1028 .unwrap_or(&tcx.queries.fallback_extern_providers)
1034 _hcx: &mut StableHashingContext<'_>,
1035 _result: &Self::Value
1036 ) -> Option<Fingerprint> {
1037 hash_result!([$($modifiers)*][_hcx, _result])
1040 fn handle_cycle_error(
1042 error: CycleError<Query<'tcx>>
1044 handle_cycle_error!([$($modifiers)*][tcx, error])
1048 #[derive(Copy, Clone)]
1049 pub struct TyCtxtEnsure<'tcx> {
1050 pub tcx: TyCtxt<'tcx>,
1053 impl TyCtxtEnsure<$tcx> {
1056 pub fn $name(self, key: $K) {
1057 self.tcx.ensure_query::<queries::$name<'_>>(key)
1061 #[derive(Copy, Clone)]
1062 pub struct TyCtxtAt<'tcx> {
1063 pub tcx: TyCtxt<'tcx>,
1067 impl Deref for TyCtxtAt<'tcx> {
1068 type Target = TyCtxt<'tcx>;
1070 fn deref(&self) -> &Self::Target {
1076 /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
1077 /// are executed instead of just returning their results.
1079 pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
1085 /// Returns a transparent wrapper for `TyCtxt` which uses
1086 /// `span` as the location of queries performed through it.
1088 pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
1097 pub fn $name(self, key: $K) -> $V {
1098 self.at(DUMMY_SP).$name(key)
1101 /// All self-profiling events generated by the query engine use
1102 /// virtual `StringId`s for their `event_id`. This method makes all
1103 /// those virtual `StringId`s point to actual strings.
1105 /// If we are recording only summary data, the ids will point to
1106 /// just the query names. If we are recording query keys too, we
1107 /// allocate the corresponding strings here.
1108 pub fn alloc_self_profile_query_strings(self) {
1109 use crate::ty::query::profiling_support::{
1110 alloc_self_profile_query_strings_for_query_cache,
1111 QueryKeyStringCache,
1114 if !self.prof.enabled() {
1118 let mut string_cache = QueryKeyStringCache::new();
1121 alloc_self_profile_query_strings_for_query_cache(
1124 &self.queries.$name,
1131 impl TyCtxtAt<$tcx> {
1134 pub fn $name(self, key: $K) -> $V {
1135 self.tcx.get_query::<queries::$name<'_>>(self.span, key)
1139 define_provider_struct! {
1141 input: ($(([$($modifiers)*] [$name] [$K] [$V]))*)
1144 impl<$tcx> Copy for Providers<$tcx> {}
1145 impl<$tcx> Clone for Providers<$tcx> {
1146 fn clone(&self) -> Self { *self }
1151 macro_rules! define_queries_struct {
1153 input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
1154 pub struct Queries<$tcx> {
1155 /// This provides access to the incrimental comilation on-disk cache for query results.
1156 /// Do not access this directly. It is only meant to be used by
1157 /// `DepGraph::try_mark_green()` and the query infrastructure.
1158 pub(crate) on_disk_cache: OnDiskCache<'tcx>,
1160 providers: IndexVec<CrateNum, Providers<$tcx>>,
1161 fallback_extern_providers: Box<Providers<$tcx>>,
1163 $($(#[$attr])* $name: QueryState<
1165 <queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
1169 impl<$tcx> Queries<$tcx> {
1171 providers: IndexVec<CrateNum, Providers<$tcx>>,
1172 fallback_extern_providers: Providers<$tcx>,
1173 on_disk_cache: OnDiskCache<'tcx>,
1177 fallback_extern_providers: Box::new(fallback_extern_providers),
1179 $($name: Default::default()),*
1183 pub(crate) fn try_collect_active_jobs(
1185 ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
1186 let mut jobs = FxHashMap::default();
1189 self.$name.try_collect_active_jobs(
1190 <queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
1202 macro_rules! define_provider_struct {
1204 input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
1205 pub struct Providers<$tcx> {
1206 $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)*
1209 impl<$tcx> Default for Providers<$tcx> {
1210 fn default() -> Self {
1211 $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
1212 bug!("`tcx.{}({:?})` unsupported by its crate",
1213 stringify!($name), key);
1215 Providers { $($name),* }