1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
5 use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeParams};
6 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
7 use crate::query::caches::QueryCache;
8 use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
9 use crate::query::job::{
10 report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId,
12 use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
14 use rustc_data_structures::fingerprint::Fingerprint;
15 use rustc_data_structures::fx::{FxHashMap, FxHasher};
16 use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
17 use rustc_data_structures::sync::{Lock, LockGuard};
18 use rustc_data_structures::thin_vec::ThinVec;
19 #[cfg(not(parallel_compiler))]
20 use rustc_errors::DiagnosticBuilder;
21 use rustc_errors::{Diagnostic, FatalError};
22 use rustc_span::{Span, DUMMY_SP};
24 use std::collections::hash_map::Entry;
26 use std::hash::{Hash, Hasher};
28 use std::num::NonZeroU32;
30 #[cfg(debug_assertions)]
31 use std::sync::atomic::{AtomicUsize, Ordering};
33 pub struct QueryCacheStore<C: QueryCache> {
35 shards: Sharded<C::Sharded>,
36 #[cfg(debug_assertions)]
37 pub cache_hits: AtomicUsize,
40 impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
41 fn default() -> Self {
44 shards: Default::default(),
45 #[cfg(debug_assertions)]
46 cache_hits: AtomicUsize::new(0),
51 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
52 pub struct QueryLookup {
53 pub(super) key_hash: u64,
57 // We compute the key's hash once and then use it for both the
58 // shard lookup and the hashmap lookup. This relies on the fact
59 // that both of them use `FxHasher`.
60 fn hash_for_shard<K: Hash>(key: &K) -> u64 {
61 let mut hasher = FxHasher::default();
62 key.hash(&mut hasher);
66 impl<C: QueryCache> QueryCacheStore<C> {
67 pub(super) fn get_lookup<'tcx>(
70 ) -> (QueryLookup, LockGuard<'tcx, C::Sharded>) {
71 let key_hash = hash_for_shard(key);
72 let shard = get_shard_index_by_hash(key_hash);
73 let lock = self.shards.get_shard_by_index(shard).lock();
74 (QueryLookup { key_hash, shard }, lock)
77 pub fn iter_results(&self, f: &mut dyn FnMut(&C::Key, &C::Value, DepNodeIndex)) {
78 self.cache.iter(&self.shards, f)
82 struct QueryStateShard<D, K> {
83 active: FxHashMap<K, QueryResult<D>>,
85 /// Used to generate unique ids for active jobs.
89 impl<D, K> Default for QueryStateShard<D, K> {
90 fn default() -> QueryStateShard<D, K> {
91 QueryStateShard { active: Default::default(), jobs: 0 }
95 pub struct QueryState<D, K> {
96 shards: Sharded<QueryStateShard<D, K>>,
99 /// Indicates the state of a query for a given key in a query map.
100 enum QueryResult<D> {
101 /// An already executing query. The query job can be used to await for its completion.
102 Started(QueryJob<D>),
104 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
109 impl<D, K> QueryState<D, K>
111 D: Copy + Clone + Eq + Hash,
112 K: Eq + Hash + Clone + Debug,
114 pub fn all_inactive(&self) -> bool {
115 let shards = self.shards.lock_shards();
116 shards.iter().all(|shard| shard.active.is_empty())
119 pub fn try_collect_active_jobs<CTX: Copy>(
123 make_query: fn(CTX, K) -> QueryStackFrame,
124 jobs: &mut QueryMap<D>,
126 // We use try_lock_shards here since we are called from the
127 // deadlock handler, and this shouldn't be locked.
128 let shards = self.shards.try_lock_shards()?;
129 for (shard_id, shard) in shards.iter().enumerate() {
130 for (k, v) in shard.active.iter() {
131 if let QueryResult::Started(ref job) = *v {
132 let id = QueryJobId::new(job.id, shard_id, kind);
133 let info = QueryInfo { span: job.span, query: make_query(tcx, k.clone()) };
134 jobs.insert(id, QueryJobInfo { info, job: job.clone() });
143 impl<D, K> Default for QueryState<D, K> {
144 fn default() -> QueryState<D, K> {
145 QueryState { shards: Default::default() }
149 /// A type representing the responsibility to execute the job in the `job` field.
150 /// This will poison the relevant query if dropped.
151 struct JobOwner<'tcx, D, C>
153 D: Copy + Clone + Eq + Hash,
156 state: &'tcx QueryState<D, C::Key>,
157 cache: &'tcx QueryCacheStore<C>,
164 #[cfg(not(parallel_compiler))]
165 fn mk_cycle<CTX, V, R>(
167 root: QueryJobId<CTX::DepKind>,
169 handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
170 cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
177 let error: CycleError = root.find_cycle_in_stack(
178 tcx.try_collect_active_jobs().unwrap(),
179 &tcx.current_query_job(),
182 let error = report_cycle(tcx.dep_context().sess(), error);
183 let value = handle_cycle_error(tcx, error);
184 cache.store_nocache(value)
187 impl<'tcx, D, C> JobOwner<'tcx, D, C>
189 D: Copy + Clone + Eq + Hash,
192 /// Either gets a `JobOwner` corresponding the query, allowing us to
193 /// start executing the query, or returns with the result of the query.
194 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
195 /// If the query is executing elsewhere, this will wait for it and return the result.
196 /// If the query panicked, this will silently panic.
198 /// This function is inlined because that results in a noticeable speed-up
199 /// for some compile-time benchmarks.
201 fn try_start<'b, CTX>(
203 state: &'b QueryState<CTX::DepKind, C::Key>,
204 cache: &'b QueryCacheStore<C>,
208 query: &QueryVtable<CTX, C::Key, C::Value>,
209 ) -> TryGetJob<'b, CTX::DepKind, C>
213 let shard = lookup.shard;
214 let mut state_lock = state.shards.get_shard_by_index(shard).lock();
215 let lock = &mut *state_lock;
217 match lock.active.entry(key) {
218 Entry::Vacant(entry) => {
219 // Generate an id unique within this shard.
220 let id = lock.jobs.checked_add(1).unwrap();
222 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
224 let job = tcx.current_query_job();
225 let job = QueryJob::new(id, span, job);
227 let key = entry.key().clone();
228 entry.insert(QueryResult::Started(job));
230 let global_id = QueryJobId::new(id, shard, query.dep_kind);
231 let owner = JobOwner { state, cache, id: global_id, key };
232 return TryGetJob::NotYetStarted(owner);
234 Entry::Occupied(mut entry) => {
235 match entry.get_mut() {
236 #[cfg(not(parallel_compiler))]
237 QueryResult::Started(job) => {
238 let id = QueryJobId::new(job.id, shard, query.dep_kind);
242 // If we are single-threaded we know that we have cycle error,
243 // so we just return the error.
244 return TryGetJob::Cycle(mk_cycle(
248 query.handle_cycle_error,
252 #[cfg(parallel_compiler)]
253 QueryResult::Started(job) => {
254 // For parallel queries, we'll block and wait until the query running
255 // in another thread has completed. Record how long we wait in the
257 let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
260 let latch = job.latch();
261 let key = entry.key().clone();
265 // With parallel queries we might just have to wait on some other
267 let result = latch.wait_on(tcx.current_query_job(), span);
269 if let Err(cycle) = result {
270 let cycle = report_cycle(tcx.dep_context().sess(), cycle);
271 let value = (query.handle_cycle_error)(tcx, cycle);
272 let value = cache.cache.store_nocache(value);
273 return TryGetJob::Cycle(value);
278 .lookup(cache, &key, |value, index| {
279 if unlikely!(tcx.dep_context().profiler().enabled()) {
280 tcx.dep_context().profiler().query_cache_hit(index.into());
282 #[cfg(debug_assertions)]
284 cache.cache_hits.fetch_add(1, Ordering::Relaxed);
286 (value.clone(), index)
288 .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
290 query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
292 return TryGetJob::JobCompleted(cached);
294 QueryResult::Poisoned => FatalError.raise(),
300 /// Completes the query by updating the query cache with the `result`,
301 /// signals the waiter and forgets the JobOwner, so it won't poison the query
302 fn complete(self, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
303 // We can move out of `self` here because we `mem::forget` it below
304 let key = unsafe { ptr::read(&self.key) };
305 let state = self.state;
306 let cache = self.cache;
308 // Forget ourself so our destructor won't poison the query
311 let (job, result) = {
312 let key_hash = hash_for_shard(&key);
313 let shard = get_shard_index_by_hash(key_hash);
315 let mut lock = state.shards.get_shard_by_index(shard).lock();
316 match lock.active.remove(&key).unwrap() {
317 QueryResult::Started(job) => job,
318 QueryResult::Poisoned => panic!(),
322 let mut lock = cache.shards.get_shard_by_index(shard).lock();
323 cache.cache.complete(&mut lock, key, result, dep_node_index)
328 job.signal_complete();
333 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
335 F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
337 let diagnostics = Lock::new(ThinVec::new());
338 let result = f(Some(&diagnostics));
339 (result, diagnostics.into_inner())
342 impl<'tcx, D, C> Drop for JobOwner<'tcx, D, C>
344 D: Copy + Clone + Eq + Hash,
350 // Poison the query so jobs waiting on it panic.
351 let state = self.state;
352 let shard = state.shards.get_shard_by_value(&self.key);
354 let mut shard = shard.lock();
355 let job = match shard.active.remove(&self.key).unwrap() {
356 QueryResult::Started(job) => job,
357 QueryResult::Poisoned => panic!(),
359 shard.active.insert(self.key.clone(), QueryResult::Poisoned);
362 // Also signal the completion of the job, so waiters
363 // will continue execution.
364 job.signal_complete();
369 pub(crate) struct CycleError {
370 /// The query and related span that uses the cycle.
371 pub usage: Option<(Span, QueryStackFrame)>,
372 pub cycle: Vec<QueryInfo>,
375 /// The result of `try_start`.
376 enum TryGetJob<'tcx, D, C>
378 D: Copy + Clone + Eq + Hash,
381 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
382 NotYetStarted(JobOwner<'tcx, D, C>),
384 /// The query was already completed.
385 /// Returns the result of the query and its dep-node index
386 /// if it succeeded or a cycle error if it failed.
387 #[cfg(parallel_compiler)]
388 JobCompleted((C::Stored, DepNodeIndex)),
390 /// Trying to execute the query resulted in a cycle.
394 /// Checks if the query is already computed and in the cache.
395 /// It returns the shard index and a lock guard to the shard,
396 /// which will be used if the query is not in the cache and we need
399 pub fn try_get_cached<'a, CTX, C, R, OnHit>(
401 cache: &'a QueryCacheStore<C>,
403 // `on_hit` can be called while holding a lock to the query cache
405 ) -> Result<R, QueryLookup>
409 OnHit: FnOnce(&C::Stored) -> R,
411 cache.cache.lookup(cache, &key, |value, index| {
412 if unlikely!(tcx.profiler().enabled()) {
413 tcx.profiler().query_cache_hit(index.into());
415 #[cfg(debug_assertions)]
417 cache.cache_hits.fetch_add(1, Ordering::Relaxed);
419 tcx.dep_graph().read_index(index);
424 fn try_execute_query<CTX, C>(
426 state: &QueryState<CTX::DepKind, C::Key>,
427 cache: &QueryCacheStore<C>,
431 query: &QueryVtable<CTX, C::Key, C::Value>,
432 compute: fn(CTX::DepContext, C::Key) -> C::Value,
436 C::Key: DepNodeParams<CTX::DepContext>,
439 let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
448 TryGetJob::NotYetStarted(job) => job,
449 TryGetJob::Cycle(result) => return result,
450 #[cfg(parallel_compiler)]
451 TryGetJob::JobCompleted((v, index)) => {
452 tcx.dep_context().dep_graph().read_index(index);
457 let dep_graph = tcx.dep_context().dep_graph();
459 // Fast path for when incr. comp. is off.
460 if !dep_graph.is_fully_enabled() {
461 let prof_timer = tcx.dep_context().profiler().query_provider();
462 let result = tcx.start_query(job.id, None, || compute(*tcx.dep_context(), key));
463 let dep_node_index = dep_graph.next_virtual_depnode_index();
464 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
465 return job.complete(result, dep_node_index);
469 let prof_timer = tcx.dep_context().profiler().query_provider();
471 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
472 tcx.start_query(job.id, diagnostics, || {
473 dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
474 compute(*tcx.dep_context(), key)
479 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
481 dep_graph.read_index(dep_node_index);
483 let side_effects = QuerySideEffects { diagnostics };
485 if unlikely!(!side_effects.is_empty()) {
486 tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
489 return job.complete(result, dep_node_index);
492 let dep_node = query.to_dep_node(*tcx.dep_context(), &key);
494 if !query.eval_always {
495 // The diagnostics for this query will be
496 // promoted to the current session during
497 // `try_mark_green()`, so we can ignore them here.
498 let loaded = tcx.start_query(job.id, None, || {
499 let marked = dep_graph.try_mark_green_and_read(tcx, &dep_node);
500 marked.map(|(prev_dep_node_index, dep_node_index)| {
502 load_from_disk_and_cache_in_memory(
515 if let Some((result, dep_node_index)) = loaded {
516 return job.complete(result, dep_node_index);
520 let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query, compute);
521 dep_graph.read_index(dep_node_index);
525 fn load_from_disk_and_cache_in_memory<CTX, K, V: Debug>(
528 prev_dep_node_index: SerializedDepNodeIndex,
529 dep_node_index: DepNodeIndex,
530 dep_node: &DepNode<CTX::DepKind>,
531 query: &QueryVtable<CTX, K, V>,
532 compute: fn(CTX::DepContext, K) -> V,
537 // Note this function can be called concurrently from the same query
538 // We must ensure that this is handled correctly.
540 debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node));
542 // First we try to load the result from the on-disk cache.
543 let result = if query.cache_on_disk(tcx, &key, None) {
544 let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
545 let result = query.try_load_from_disk(tcx, prev_dep_node_index);
546 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
548 // We always expect to find a cached result for things that
549 // can be forced from `DepNode`.
551 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
552 "missing on-disk cache entry for {:?}",
557 // Some things are never cached on disk.
561 if let Some(result) = result {
562 // If `-Zincremental-verify-ich` is specified, re-hash results from
563 // the cache and make sure that they have the expected fingerprint.
564 if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
565 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
570 // We could not load a result from the on-disk cache, so
572 let prof_timer = tcx.dep_context().profiler().query_provider();
574 // The dep-graph for this computation is already in-place.
575 let result = tcx.dep_context().dep_graph().with_ignore(|| compute(*tcx.dep_context(), key));
577 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
579 // Verify that re-running the query produced a result with the expected hash
580 // This catches bugs in query implementations, turning them into ICEs.
581 // For example, a query might sort its result by `DefId` - since `DefId`s are
582 // not stable across compilation sessions, the result could get up getting sorted
583 // in a different order when the query is re-run, even though all of the inputs
584 // (e.g. `DefPathHash` values) were green.
586 // See issue #82920 for an example of a miscompilation that would get turned into
587 // an ICE by this check
588 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
594 fn incremental_verify_ich<CTX, K, V: Debug>(
595 tcx: CTX::DepContext,
597 dep_node: &DepNode<CTX::DepKind>,
598 query: &QueryVtable<CTX, K, V>,
603 tcx.dep_graph().is_green(dep_node),
604 "fingerprint for green query instance not loaded from cache: {:?}",
608 debug!("BEGIN verify_ich({:?})", dep_node);
609 let mut hcx = tcx.create_stable_hashing_context();
611 let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
612 debug!("END verify_ich({:?})", dep_node);
614 let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
616 if Some(new_hash) != old_hash {
617 let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
618 format!("`cargo clean -p {}` or `cargo clean`", crate_name)
620 "`cargo clean`".to_string()
623 // When we emit an error message and panic, we try to debug-print the `DepNode`
624 // and query result. Unforunately, this can cause us to run additional queries,
625 // which may result in another fingerprint mismatch while we're in the middle
626 // of processing this one. To avoid a double-panic (which kills the process
627 // before we can print out the query static), we print out a terse
628 // but 'safe' message if we detect a re-entrant call to this method.
630 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
633 let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
636 tcx.sess().struct_err("internal compiler error: re-entrant incremental verify failure, suppressing message")
639 tcx.sess().struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
640 .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
641 .note(&"Please follow the instructions below to create a bug report with the provided information")
642 .note(&"See <https://github.com/rust-lang/rust/issues/84970> for more information")
644 panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
647 INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
651 fn force_query_with_job<C, CTX>(
654 job: JobOwner<'_, CTX::DepKind, C>,
655 dep_node: DepNode<CTX::DepKind>,
656 query: &QueryVtable<CTX, C::Key, C::Value>,
657 compute: fn(CTX::DepContext, C::Key) -> C::Value,
658 ) -> (C::Stored, DepNodeIndex)
663 // If the following assertion triggers, it can have two reasons:
664 // 1. Something is wrong with DepNode creation, either here or
665 // in `DepGraph::try_mark_green()`.
666 // 2. Two distinct query keys get mapped to the same `DepNode`
667 // (see for example #48923).
669 !tcx.dep_context().dep_graph().dep_node_exists(&dep_node),
670 "forcing query with already existing `DepNode`\n\
677 let prof_timer = tcx.dep_context().profiler().query_provider();
679 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
680 tcx.start_query(job.id, diagnostics, || {
681 if query.eval_always {
682 tcx.dep_context().dep_graph().with_eval_always_task(
690 tcx.dep_context().dep_graph().with_task(
701 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
703 let side_effects = QuerySideEffects { diagnostics };
705 if unlikely!(!side_effects.is_empty()) && dep_node.kind != DepKind::NULL {
706 tcx.store_side_effects(dep_node_index, side_effects);
709 let result = job.complete(result, dep_node_index);
711 (result, dep_node_index)
715 fn get_query_impl<CTX, C>(
717 state: &QueryState<CTX::DepKind, C::Key>,
718 cache: &QueryCacheStore<C>,
722 query: &QueryVtable<CTX, C::Key, C::Value>,
723 compute: fn(CTX::DepContext, C::Key) -> C::Value,
728 C::Key: DepNodeParams<CTX::DepContext>,
730 try_execute_query(tcx, state, cache, span, key, lookup, query, compute)
733 /// Ensure that either this query has all green inputs or been executed.
734 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
735 /// Returns true if the query should still run.
737 /// This function is particularly useful when executing passes for their
738 /// side-effects -- e.g., in order to report errors for erroneous programs.
740 /// Note: The optimization is only available during incr. comp.
742 fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
744 K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
747 if query.eval_always {
751 // Ensuring an anonymous query makes no sense
752 assert!(!query.anon);
754 let dep_node = query.to_dep_node(*tcx.dep_context(), key);
756 match tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node) {
758 // A None return from `try_mark_green_and_read` means that this is either
759 // a new dep node or that the dep node has already been marked red.
760 // Either way, we can't call `dep_graph.read()` as we don't have the
761 // DepNodeIndex. We must invoke the query itself. The performance cost
762 // this introduces should be negligible as we'll immediately hit the
763 // in-memory cache, or another query down the line will.
766 Some((_, dep_node_index)) => {
767 tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
774 fn force_query_impl<CTX, C>(
776 state: &QueryState<CTX::DepKind, C::Key>,
777 cache: &QueryCacheStore<C>,
779 dep_node: DepNode<CTX::DepKind>,
780 query: &QueryVtable<CTX, C::Key, C::Value>,
781 compute: fn(CTX::DepContext, C::Key) -> C::Value,
785 C::Key: DepNodeParams<CTX::DepContext>,
788 debug_assert!(!query.anon);
790 // We may be concurrently trying both execute and force a query.
791 // Ensure that only one of them runs the query.
792 let cached = cache.cache.lookup(cache, &key, |_, index| {
793 if unlikely!(tcx.dep_context().profiler().enabled()) {
794 tcx.dep_context().profiler().query_cache_hit(index.into());
796 #[cfg(debug_assertions)]
798 cache.cache_hits.fetch_add(1, Ordering::Relaxed);
802 let lookup = match cached {
803 Ok(()) => return true,
804 Err(lookup) => lookup,
807 let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
816 TryGetJob::NotYetStarted(job) => job,
817 TryGetJob::Cycle(_) => return true,
818 #[cfg(parallel_compiler)]
819 TryGetJob::JobCompleted(_) => return true,
822 force_query_with_job(tcx, key, job, dep_node, query, compute);
832 pub fn get_query<Q, CTX>(
838 ) -> Option<Q::Stored>
840 Q: QueryDescription<CTX>,
841 Q::Key: DepNodeParams<CTX::DepContext>,
844 let query = &Q::VTABLE;
845 if let QueryMode::Ensure = mode {
846 if !ensure_must_run(tcx, &key, query) {
851 debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
852 let compute = Q::compute_fn(tcx, &key);
853 let value = get_query_impl(
866 pub fn force_query<Q, CTX>(tcx: CTX, dep_node: &DepNode<CTX::DepKind>) -> bool
868 Q: QueryDescription<CTX>,
869 Q::Key: DepNodeParams<CTX::DepContext>,
876 if !<Q::Key as DepNodeParams<CTX::DepContext>>::can_reconstruct_query_key() {
880 let key = if let Some(key) =
881 <Q::Key as DepNodeParams<CTX::DepContext>>::recover(*tcx.dep_context(), &dep_node)
888 let compute = Q::compute_fn(tcx, &key);