]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/query/plumbing.rs
14c0aea7c8fa1abcac42774c9dcbc96395e37f88
[rust.git] / src / librustc / ty / query / plumbing.rs
1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
4
5 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
6 use crate::ty::query::caches::QueryCache;
7 use crate::ty::query::config::{QueryContext, QueryDescription};
8 use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
9 use crate::ty::query::Query;
10 use crate::ty::tls;
11 use crate::ty::{self, TyCtxt};
12
13 #[cfg(not(parallel_compiler))]
14 use rustc_data_structures::cold_path;
15 use rustc_data_structures::fx::{FxHashMap, FxHasher};
16 use rustc_data_structures::sharded::Sharded;
17 use rustc_data_structures::sync::{Lock, LockGuard};
18 use rustc_data_structures::thin_vec::ThinVec;
19 use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level};
20 use rustc_query_system::dep_graph::{DepKind, DepNode};
21 use rustc_session::Session;
22 use rustc_span::def_id::DefId;
23 use rustc_span::source_map::DUMMY_SP;
24 use rustc_span::Span;
25 use std::collections::hash_map::Entry;
26 use std::convert::TryFrom;
27 use std::fmt::Debug;
28 use std::hash::{Hash, Hasher};
29 use std::mem;
30 use std::num::NonZeroU32;
31 use std::ptr;
32 #[cfg(debug_assertions)]
33 use std::sync::atomic::{AtomicUsize, Ordering};
34
35 pub(crate) struct QueryStateShard<CTX: QueryContext, K, C> {
36     cache: C,
37     active: FxHashMap<K, QueryResult<CTX>>,
38
39     /// Used to generate unique ids for active jobs.
40     jobs: u32,
41 }
42
43 impl<CTX: QueryContext, K, C> QueryStateShard<CTX, K, C> {
44     fn get_cache(&mut self) -> &mut C {
45         &mut self.cache
46     }
47 }
48
49 impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
50     fn default() -> QueryStateShard<CTX, K, C> {
51         QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
52     }
53 }
54
55 pub(crate) struct QueryState<CTX: QueryContext, C: QueryCache<CTX>> {
56     cache: C,
57     shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
58     #[cfg(debug_assertions)]
59     pub(super) cache_hits: AtomicUsize,
60 }
61
62 impl<CTX: QueryContext, C: QueryCache<CTX>> QueryState<CTX, C> {
63     pub(super) fn get_lookup<K2: Hash>(
64         &'tcx self,
65         key: &K2,
66     ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
67         // We compute the key's hash once and then use it for both the
68         // shard lookup and the hashmap lookup. This relies on the fact
69         // that both of them use `FxHasher`.
70         let mut hasher = FxHasher::default();
71         key.hash(&mut hasher);
72         let key_hash = hasher.finish();
73
74         let shard = self.shards.get_shard_index_by_hash(key_hash);
75         let lock = self.shards.get_shard_by_index(shard).lock();
76         QueryLookup { key_hash, shard, lock }
77     }
78 }
79
80 /// Indicates the state of a query for a given key in a query map.
81 enum QueryResult<CTX: QueryContext> {
82     /// An already executing query. The query job can be used to await for its completion.
83     Started(QueryJob<CTX>),
84
85     /// The query panicked. Queries trying to wait on this will raise a fatal error which will
86     /// silently panic.
87     Poisoned,
88 }
89
90 impl<CTX: QueryContext, C: QueryCache<CTX>> QueryState<CTX, C> {
91     pub(super) fn iter_results<R>(
92         &self,
93         f: impl for<'a> FnOnce(
94             Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
95         ) -> R,
96     ) -> R {
97         self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
98     }
99     pub(super) fn all_inactive(&self) -> bool {
100         let shards = self.shards.lock_shards();
101         shards.iter().all(|shard| shard.active.is_empty())
102     }
103
104     pub(super) fn try_collect_active_jobs(
105         &self,
106         kind: CTX::DepKind,
107         make_query: fn(C::Key) -> CTX::Query,
108         jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
109     ) -> Option<()>
110     where
111         C::Key: Clone,
112     {
113         // We use try_lock_shards here since we are called from the
114         // deadlock handler, and this shouldn't be locked.
115         let shards = self.shards.try_lock_shards()?;
116         let shards = shards.iter().enumerate();
117         jobs.extend(shards.flat_map(|(shard_id, shard)| {
118             shard.active.iter().filter_map(move |(k, v)| {
119                 if let QueryResult::Started(ref job) = *v {
120                     let id =
121                         QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
122                     let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
123                     Some((id, QueryJobInfo { info, job: job.clone() }))
124                 } else {
125                     None
126                 }
127             })
128         }));
129
130         Some(())
131     }
132 }
133
134 impl<CTX: QueryContext, C: QueryCache<CTX>> Default for QueryState<CTX, C> {
135     fn default() -> QueryState<CTX, C> {
136         QueryState {
137             cache: C::default(),
138             shards: Default::default(),
139             #[cfg(debug_assertions)]
140             cache_hits: AtomicUsize::new(0),
141         }
142     }
143 }
144
145 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
146 pub(crate) struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
147     pub(super) key_hash: u64,
148     shard: usize,
149     pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
150 }
151
152 /// A type representing the responsibility to execute the job in the `job` field.
153 /// This will poison the relevant query if dropped.
154 struct JobOwner<'tcx, CTX: QueryContext, C>
155 where
156     C: QueryCache<CTX>,
157     C::Key: Eq + Hash + Clone + Debug,
158     C::Value: Clone,
159 {
160     state: &'tcx QueryState<CTX, C>,
161     key: C::Key,
162     id: QueryJobId<CTX::DepKind>,
163 }
164
165 impl<'tcx, C> JobOwner<'tcx, TyCtxt<'tcx>, C>
166 where
167     C: QueryCache<TyCtxt<'tcx>> + 'tcx,
168     C::Key: Eq + Hash + Clone + Debug,
169     C::Value: Clone,
170 {
171     /// Either gets a `JobOwner` corresponding the query, allowing us to
172     /// start executing the query, or returns with the result of the query.
173     /// This function assumes that `try_get_cached` is already called and returned `lookup`.
174     /// If the query is executing elsewhere, this will wait for it and return the result.
175     /// If the query panicked, this will silently panic.
176     ///
177     /// This function is inlined because that results in a noticeable speed-up
178     /// for some compile-time benchmarks.
179     #[inline(always)]
180     fn try_start<'a, 'b, Q>(
181         tcx: TyCtxt<'tcx>,
182         span: Span,
183         key: &C::Key,
184         mut lookup: QueryLookup<'a, TyCtxt<'tcx>, C::Key, C::Sharded>,
185     ) -> TryGetJob<'b, TyCtxt<'tcx>, C>
186     where
187         Q: QueryDescription<TyCtxt<'tcx>, Key = C::Key, Value = C::Value, Cache = C>,
188     {
189         let lock = &mut *lookup.lock;
190
191         let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
192             Entry::Occupied(mut entry) => {
193                 match entry.get_mut() {
194                     QueryResult::Started(job) => {
195                         // For parallel queries, we'll block and wait until the query running
196                         // in another thread has completed. Record how long we wait in the
197                         // self-profiler.
198                         let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
199                             Some(tcx.prof.query_blocked())
200                         } else {
201                             None
202                         };
203
204                         // Create the id of the job we're waiting for
205                         let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
206
207                         (job.latch(id), _query_blocked_prof_timer)
208                     }
209                     QueryResult::Poisoned => FatalError.raise(),
210                 }
211             }
212             Entry::Vacant(entry) => {
213                 // No job entry for this query. Return a new one to be started later.
214
215                 // Generate an id unique within this shard.
216                 let id = lock.jobs.checked_add(1).unwrap();
217                 lock.jobs = id;
218                 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
219
220                 let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
221
222                 let job = tls::with_related_context(tcx, |icx| QueryJob::new(id, span, icx.query));
223
224                 entry.insert(QueryResult::Started(job));
225
226                 let owner =
227                     JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
228                 return TryGetJob::NotYetStarted(owner);
229             }
230         };
231         mem::drop(lookup.lock);
232
233         // If we are single-threaded we know that we have cycle error,
234         // so we just return the error.
235         #[cfg(not(parallel_compiler))]
236         return TryGetJob::Cycle(cold_path(|| {
237             Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
238         }));
239
240         // With parallel queries we might just have to wait on some other
241         // thread.
242         #[cfg(parallel_compiler)]
243         {
244             let result = latch.wait_on(tcx, span);
245
246             if let Err(cycle) = result {
247                 return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
248             }
249
250             let cached = tcx.try_get_cached(
251                 Q::query_state(tcx),
252                 (*key).clone(),
253                 |value, index| (value.clone(), index),
254                 |_, _| panic!("value must be in cache after waiting"),
255             );
256
257             if let Some(prof_timer) = _query_blocked_prof_timer.take() {
258                 prof_timer.finish_with_query_invocation_id(cached.1.into());
259             }
260
261             return TryGetJob::JobCompleted(cached);
262         }
263     }
264 }
265
266 impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
267 where
268     C: QueryCache<CTX>,
269     C::Key: Eq + Hash + Clone + Debug,
270     C::Value: Clone,
271 {
272     /// Completes the query by updating the query cache with the `result`,
273     /// signals the waiter and forgets the JobOwner, so it won't poison the query
274     #[inline(always)]
275     fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) {
276         // We can move out of `self` here because we `mem::forget` it below
277         let key = unsafe { ptr::read(&self.key) };
278         let state = self.state;
279
280         // Forget ourself so our destructor won't poison the query
281         mem::forget(self);
282
283         let job = {
284             let result = result.clone();
285             let mut lock = state.shards.get_shard_by_value(&key).lock();
286             let job = match lock.active.remove(&key).unwrap() {
287                 QueryResult::Started(job) => job,
288                 QueryResult::Poisoned => panic!(),
289             };
290             state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
291             job
292         };
293
294         job.signal_complete();
295     }
296 }
297
298 #[inline(always)]
299 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
300 where
301     F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
302 {
303     let diagnostics = Lock::new(ThinVec::new());
304     let result = f(Some(&diagnostics));
305     (result, diagnostics.into_inner())
306 }
307
308 impl<'tcx, CTX: QueryContext, C: QueryCache<CTX>> Drop for JobOwner<'tcx, CTX, C>
309 where
310     C::Key: Eq + Hash + Clone + Debug,
311     C::Value: Clone,
312 {
313     #[inline(never)]
314     #[cold]
315     fn drop(&mut self) {
316         // Poison the query so jobs waiting on it panic.
317         let state = self.state;
318         let shard = state.shards.get_shard_by_value(&self.key);
319         let job = {
320             let mut shard = shard.lock();
321             let job = match shard.active.remove(&self.key).unwrap() {
322                 QueryResult::Started(job) => job,
323                 QueryResult::Poisoned => panic!(),
324             };
325             shard.active.insert(self.key.clone(), QueryResult::Poisoned);
326             job
327         };
328         // Also signal the completion of the job, so waiters
329         // will continue execution.
330         job.signal_complete();
331     }
332 }
333
334 #[derive(Clone)]
335 pub(crate) struct CycleError<Q> {
336     /// The query and related span that uses the cycle.
337     pub(super) usage: Option<(Span, Q)>,
338     pub(super) cycle: Vec<QueryInfo<Q>>,
339 }
340
341 /// The result of `try_start`.
342 enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache<CTX>>
343 where
344     C::Key: Eq + Hash + Clone + Debug,
345     C::Value: Clone,
346 {
347     /// The query is not yet started. Contains a guard to the cache eventually used to start it.
348     NotYetStarted(JobOwner<'tcx, CTX, C>),
349
350     /// The query was already completed.
351     /// Returns the result of the query and its dep-node index
352     /// if it succeeded or a cycle error if it failed.
353     #[cfg(parallel_compiler)]
354     JobCompleted((C::Value, DepNodeIndex)),
355
356     /// Trying to execute the query resulted in a cycle.
357     Cycle(C::Value),
358 }
359
360 impl QueryContext for TyCtxt<'tcx> {
361     type Query = Query<'tcx>;
362
363     fn session(&self) -> &Session {
364         &self.sess
365     }
366
367     fn def_path_str(&self, def_id: DefId) -> String {
368         TyCtxt::def_path_str(*self, def_id)
369     }
370
371     fn read_query_job<R>(&self, op: impl FnOnce(Option<QueryJobId<Self::DepKind>>) -> R) -> R {
372         tls::with_related_context(*self, move |icx| op(icx.query))
373     }
374
375     fn try_collect_active_jobs(
376         &self,
377     ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
378         self.queries.try_collect_active_jobs()
379     }
380 }
381
382 impl<'tcx> TyCtxt<'tcx> {
383     /// Executes a job by changing the `ImplicitCtxt` to point to the
384     /// new query job while it executes. It returns the diagnostics
385     /// captured during execution and the actual result.
386     #[inline(always)]
387     fn start_query<F, R>(
388         self,
389         token: QueryJobId<crate::dep_graph::DepKind>,
390         diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
391         compute: F,
392     ) -> R
393     where
394         F: FnOnce(TyCtxt<'tcx>) -> R,
395     {
396         // The `TyCtxt` stored in TLS has the same global interner lifetime
397         // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
398         // when accessing the `ImplicitCtxt`.
399         tls::with_related_context(self, move |current_icx| {
400             // Update the `ImplicitCtxt` to point to our new query job.
401             let new_icx = tls::ImplicitCtxt {
402                 tcx: self,
403                 query: Some(token),
404                 diagnostics,
405                 layout_depth: current_icx.layout_depth,
406                 task_deps: current_icx.task_deps,
407             };
408
409             // Use the `ImplicitCtxt` while we execute the query.
410             tls::enter_context(&new_icx, |_| compute(self))
411         })
412     }
413
414     #[inline(never)]
415     #[cold]
416     pub(super) fn report_cycle(
417         self,
418         CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
419     ) -> DiagnosticBuilder<'tcx> {
420         assert!(!stack.is_empty());
421
422         let fix_span = |span: Span, query: &Query<'tcx>| {
423             self.sess.source_map().guess_head_span(query.default_span(self, span))
424         };
425
426         // Disable naming impls with types in this path, since that
427         // sometimes cycles itself, leading to extra cycle errors.
428         // (And cycle errors around impls tend to occur during the
429         // collect/coherence phases anyhow.)
430         ty::print::with_forced_impl_filename_line(|| {
431             let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
432             let mut err = struct_span_err!(
433                 self.sess,
434                 span,
435                 E0391,
436                 "cycle detected when {}",
437                 stack[0].query.describe(self)
438             );
439
440             for i in 1..stack.len() {
441                 let query = &stack[i].query;
442                 let span = fix_span(stack[(i + 1) % stack.len()].span, query);
443                 err.span_note(span, &format!("...which requires {}...", query.describe(self)));
444             }
445
446             err.note(&format!(
447                 "...which again requires {}, completing the cycle",
448                 stack[0].query.describe(self)
449             ));
450
451             if let Some((span, query)) = usage {
452                 err.span_note(
453                     fix_span(span, &query),
454                     &format!("cycle used when {}", query.describe(self)),
455                 );
456             }
457
458             err
459         })
460     }
461
462     pub fn try_print_query_stack(handler: &Handler) {
463         eprintln!("query stack during panic:");
464
465         // Be careful reyling on global state here: this code is called from
466         // a panic hook, which means that the global `Handler` may be in a weird
467         // state if it was responsible for triggering the panic.
468         tls::with_context_opt(|icx| {
469             if let Some(icx) = icx {
470                 let query_map = icx.tcx.queries.try_collect_active_jobs();
471
472                 let mut current_query = icx.query;
473                 let mut i = 0;
474
475                 while let Some(query) = current_query {
476                     let query_info =
477                         if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
478                             info
479                         } else {
480                             break;
481                         };
482                     let mut diag = Diagnostic::new(
483                         Level::FailureNote,
484                         &format!(
485                             "#{} [{}] {}",
486                             i,
487                             query_info.info.query.name(),
488                             query_info.info.query.describe(icx.tcx)
489                         ),
490                     );
491                     diag.span =
492                         icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
493                     handler.force_print_diagnostic(diag);
494
495                     current_query = query_info.job.parent;
496                     i += 1;
497                 }
498             }
499         });
500
501         eprintln!("end of query stack");
502     }
503
504     /// Checks if the query is already computed and in the cache.
505     /// It returns the shard index and a lock guard to the shard,
506     /// which will be used if the query is not in the cache and we need
507     /// to compute it.
508     #[inline(always)]
509     fn try_get_cached<C, R, OnHit, OnMiss>(
510         self,
511         state: &'tcx QueryState<TyCtxt<'tcx>, C>,
512         key: C::Key,
513         // `on_hit` can be called while holding a lock to the query cache
514         on_hit: OnHit,
515         on_miss: OnMiss,
516     ) -> R
517     where
518         C: QueryCache<TyCtxt<'tcx>>,
519         OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
520         OnMiss: FnOnce(C::Key, QueryLookup<'_, TyCtxt<'tcx>, C::Key, C::Sharded>) -> R,
521     {
522         state.cache.lookup(
523             state,
524             QueryStateShard::<TyCtxt<'tcx>, C::Key, C::Sharded>::get_cache,
525             key,
526             |value, index| {
527                 if unlikely!(self.prof.enabled()) {
528                     self.prof.query_cache_hit(index.into());
529                 }
530                 #[cfg(debug_assertions)]
531                 {
532                     state.cache_hits.fetch_add(1, Ordering::Relaxed);
533                 }
534                 on_hit(value, index)
535             },
536             on_miss,
537         )
538     }
539
540     #[inline(never)]
541     pub(super) fn get_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
542         self,
543         span: Span,
544         key: Q::Key,
545     ) -> Q::Value {
546         debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
547
548         self.try_get_cached(
549             Q::query_state(self),
550             key,
551             |value, index| {
552                 self.dep_graph.read_index(index);
553                 value.clone()
554             },
555             |key, lookup| self.try_execute_query::<Q>(span, key, lookup),
556         )
557     }
558
559     #[inline(always)]
560     fn try_execute_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
561         self,
562         span: Span,
563         key: Q::Key,
564         lookup: QueryLookup<
565             '_,
566             TyCtxt<'tcx>,
567             Q::Key,
568             <Q::Cache as QueryCache<TyCtxt<'tcx>>>::Sharded,
569         >,
570     ) -> Q::Value {
571         let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
572             TryGetJob::NotYetStarted(job) => job,
573             TryGetJob::Cycle(result) => return result,
574             #[cfg(parallel_compiler)]
575             TryGetJob::JobCompleted((v, index)) => {
576                 self.dep_graph.read_index(index);
577                 return v;
578             }
579         };
580
581         // Fast path for when incr. comp. is off. `to_dep_node` is
582         // expensive for some `DepKind`s.
583         if !self.dep_graph.is_fully_enabled() {
584             let null_dep_node = DepNode::new_no_params(DepKind::NULL);
585             return self.force_query_with_job::<Q>(key, job, null_dep_node).0;
586         }
587
588         if Q::ANON {
589             let prof_timer = self.prof.query_provider();
590
591             let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
592                 self.start_query(job.id, diagnostics, |tcx| {
593                     tcx.dep_graph.with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
594                 })
595             });
596
597             prof_timer.finish_with_query_invocation_id(dep_node_index.into());
598
599             self.dep_graph.read_index(dep_node_index);
600
601             if unlikely!(!diagnostics.is_empty()) {
602                 self.queries
603                     .on_disk_cache
604                     .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
605             }
606
607             job.complete(self, &result, dep_node_index);
608
609             return result;
610         }
611
612         let dep_node = Q::to_dep_node(self, &key);
613
614         if !Q::EVAL_ALWAYS {
615             // The diagnostics for this query will be
616             // promoted to the current session during
617             // `try_mark_green()`, so we can ignore them here.
618             let loaded = self.start_query(job.id, None, |tcx| {
619                 let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
620                 marked.map(|(prev_dep_node_index, dep_node_index)| {
621                     (
622                         tcx.load_from_disk_and_cache_in_memory::<Q>(
623                             key.clone(),
624                             prev_dep_node_index,
625                             dep_node_index,
626                             &dep_node,
627                         ),
628                         dep_node_index,
629                     )
630                 })
631             });
632             if let Some((result, dep_node_index)) = loaded {
633                 job.complete(self, &result, dep_node_index);
634                 return result;
635             }
636         }
637
638         let (result, dep_node_index) = self.force_query_with_job::<Q>(key, job, dep_node);
639         self.dep_graph.read_index(dep_node_index);
640         result
641     }
642
643     fn load_from_disk_and_cache_in_memory<Q: QueryDescription<TyCtxt<'tcx>>>(
644         self,
645         key: Q::Key,
646         prev_dep_node_index: SerializedDepNodeIndex,
647         dep_node_index: DepNodeIndex,
648         dep_node: &DepNode<crate::dep_graph::DepKind>,
649     ) -> Q::Value {
650         // Note this function can be called concurrently from the same query
651         // We must ensure that this is handled correctly.
652
653         debug_assert!(self.dep_graph.is_green(dep_node));
654
655         // First we try to load the result from the on-disk cache.
656         let result = if Q::cache_on_disk(self, key.clone(), None) {
657             let prof_timer = self.prof.incr_cache_loading();
658             let result = Q::try_load_from_disk(self, prev_dep_node_index);
659             prof_timer.finish_with_query_invocation_id(dep_node_index.into());
660
661             // We always expect to find a cached result for things that
662             // can be forced from `DepNode`.
663             debug_assert!(
664                 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
665                 "missing on-disk cache entry for {:?}",
666                 dep_node
667             );
668             result
669         } else {
670             // Some things are never cached on disk.
671             None
672         };
673
674         let result = if let Some(result) = result {
675             result
676         } else {
677             // We could not load a result from the on-disk cache, so
678             // recompute.
679             let prof_timer = self.prof.query_provider();
680
681             // The dep-graph for this computation is already in-place.
682             let result = self.dep_graph.with_ignore(|| Q::compute(self, key));
683
684             prof_timer.finish_with_query_invocation_id(dep_node_index.into());
685
686             result
687         };
688
689         // If `-Zincremental-verify-ich` is specified, re-hash results from
690         // the cache and make sure that they have the expected fingerprint.
691         if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) {
692             self.incremental_verify_ich::<Q>(&result, dep_node, dep_node_index);
693         }
694
695         result
696     }
697
698     #[inline(never)]
699     #[cold]
700     fn incremental_verify_ich<Q: QueryDescription<TyCtxt<'tcx>>>(
701         self,
702         result: &Q::Value,
703         dep_node: &DepNode<crate::dep_graph::DepKind>,
704         dep_node_index: DepNodeIndex,
705     ) {
706         use rustc_data_structures::fingerprint::Fingerprint;
707
708         assert!(
709             Some(self.dep_graph.fingerprint_of(dep_node_index))
710                 == self.dep_graph.prev_fingerprint_of(dep_node),
711             "fingerprint for green query instance not loaded from cache: {:?}",
712             dep_node,
713         );
714
715         debug!("BEGIN verify_ich({:?})", dep_node);
716         let mut hcx = self.create_stable_hashing_context();
717
718         let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
719         debug!("END verify_ich({:?})", dep_node);
720
721         let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
722
723         assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
724     }
725
726     #[inline(always)]
727     fn force_query_with_job<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
728         self,
729         key: Q::Key,
730         job: JobOwner<'tcx, Self, Q::Cache>,
731         dep_node: DepNode<crate::dep_graph::DepKind>,
732     ) -> (Q::Value, DepNodeIndex) {
733         // If the following assertion triggers, it can have two reasons:
734         // 1. Something is wrong with DepNode creation, either here or
735         //    in `DepGraph::try_mark_green()`.
736         // 2. Two distinct query keys get mapped to the same `DepNode`
737         //    (see for example #48923).
738         assert!(
739             !self.dep_graph.dep_node_exists(&dep_node),
740             "forcing query with already existing `DepNode`\n\
741                  - query-key: {:?}\n\
742                  - dep-node: {:?}",
743             key,
744             dep_node
745         );
746
747         let prof_timer = self.prof.query_provider();
748
749         let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
750             self.start_query(job.id, diagnostics, |tcx| {
751                 if Q::EVAL_ALWAYS {
752                     tcx.dep_graph.with_eval_always_task(
753                         dep_node,
754                         tcx,
755                         key,
756                         Q::compute,
757                         Q::hash_result,
758                     )
759                 } else {
760                     tcx.dep_graph.with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
761                 }
762             })
763         });
764
765         prof_timer.finish_with_query_invocation_id(dep_node_index.into());
766
767         if unlikely!(!diagnostics.is_empty()) {
768             if dep_node.kind != DepKind::NULL {
769                 self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics);
770             }
771         }
772
773         job.complete(self, &result, dep_node_index);
774
775         (result, dep_node_index)
776     }
777
778     /// Ensure that either this query has all green inputs or been executed.
779     /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
780     ///
781     /// This function is particularly useful when executing passes for their
782     /// side-effects -- e.g., in order to report errors for erroneous programs.
783     ///
784     /// Note: The optimization is only available during incr. comp.
785     pub(super) fn ensure_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(self, key: Q::Key) {
786         if Q::EVAL_ALWAYS {
787             let _ = self.get_query::<Q>(DUMMY_SP, key);
788             return;
789         }
790
791         // Ensuring an anonymous query makes no sense
792         assert!(!Q::ANON);
793
794         let dep_node = Q::to_dep_node(self, &key);
795
796         match self.dep_graph.try_mark_green_and_read(self, &dep_node) {
797             None => {
798                 // A None return from `try_mark_green_and_read` means that this is either
799                 // a new dep node or that the dep node has already been marked red.
800                 // Either way, we can't call `dep_graph.read()` as we don't have the
801                 // DepNodeIndex. We must invoke the query itself. The performance cost
802                 // this introduces should be negligible as we'll immediately hit the
803                 // in-memory cache, or another query down the line will.
804                 let _ = self.get_query::<Q>(DUMMY_SP, key);
805             }
806             Some((_, dep_node_index)) => {
807                 self.prof.query_cache_hit(dep_node_index.into());
808             }
809         }
810     }
811
812     #[allow(dead_code)]
813     pub(super) fn force_query<Q: QueryDescription<TyCtxt<'tcx>> + 'tcx>(
814         self,
815         key: Q::Key,
816         span: Span,
817         dep_node: DepNode<crate::dep_graph::DepKind>,
818     ) {
819         // We may be concurrently trying both execute and force a query.
820         // Ensure that only one of them runs the query.
821
822         self.try_get_cached(
823             Q::query_state(self),
824             key,
825             |_, _| {
826                 // Cache hit, do nothing
827             },
828             |key, lookup| {
829                 let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
830                     TryGetJob::NotYetStarted(job) => job,
831                     TryGetJob::Cycle(_) => return,
832                     #[cfg(parallel_compiler)]
833                     TryGetJob::JobCompleted(_) => return,
834                 };
835                 self.force_query_with_job::<Q>(key, job, dep_node);
836             },
837         );
838     }
839 }
840
841 macro_rules! handle_cycle_error {
842     ([][$tcx: expr, $error:expr]) => {{
843         $tcx.report_cycle($error).emit();
844         Value::from_cycle_error($tcx)
845     }};
846     ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
847         $tcx.report_cycle($error).emit();
848         $tcx.sess.abort_if_errors();
849         unreachable!()
850     }};
851     ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
852         $tcx.report_cycle($error).delay_as_bug();
853         Value::from_cycle_error($tcx)
854     }};
855     ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
856         handle_cycle_error!([$($($modifiers)*)*][$($args)*])
857     };
858 }
859
860 macro_rules! is_anon {
861     ([]) => {{
862         false
863     }};
864     ([anon $($rest:tt)*]) => {{
865         true
866     }};
867     ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
868         is_anon!([$($($modifiers)*)*])
869     };
870 }
871
872 macro_rules! is_eval_always {
873     ([]) => {{
874         false
875     }};
876     ([eval_always $($rest:tt)*]) => {{
877         true
878     }};
879     ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
880         is_eval_always!([$($($modifiers)*)*])
881     };
882 }
883
884 macro_rules! query_storage {
885     (<$tcx:tt>[][$K:ty, $V:ty]) => {
886         <<$K as Key>::CacheSelector as CacheSelector<TyCtxt<$tcx>, $K, $V>>::Cache
887     };
888     (<$tcx:tt>[storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
889         $ty
890     };
891     (<$tcx:tt>[$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
892         query_storage!(<$tcx>[$($($modifiers)*)*][$($args)*])
893     };
894 }
895
896 macro_rules! hash_result {
897     ([][$hcx:expr, $result:expr]) => {{
898         dep_graph::hash_result($hcx, &$result)
899     }};
900     ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
901         None
902     }};
903     ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
904         hash_result!([$($($modifiers)*)*][$($args)*])
905     };
906 }
907
908 macro_rules! define_queries {
909     (<$tcx:tt> $($category:tt {
910         $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
911     },)*) => {
912         define_queries_inner! { <$tcx>
913             $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)*
914         }
915     }
916 }
917
918 macro_rules! define_queries_inner {
919     (<$tcx:tt>
920      $($(#[$attr:meta])* category<$category:tt>
921         [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
922
923         use std::mem;
924         use crate::{
925             rustc_data_structures::stable_hasher::HashStable,
926             rustc_data_structures::stable_hasher::StableHasher,
927             ich::StableHashingContext
928         };
929         use rustc_data_structures::profiling::ProfileCategory;
930
931         define_queries_struct! {
932             tcx: $tcx,
933             input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
934         }
935
936         #[allow(nonstandard_style)]
937         #[derive(Clone, Debug)]
938         pub enum Query<$tcx> {
939             $($(#[$attr])* $name($K)),*
940         }
941
942         impl<$tcx> Query<$tcx> {
943             pub fn name(&self) -> &'static str {
944                 match *self {
945                     $(Query::$name(_) => stringify!($name),)*
946                 }
947             }
948
949             pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> {
950                 let (r, name) = match *self {
951                     $(Query::$name(key) => {
952                         (queries::$name::describe(tcx, key), stringify!($name))
953                     })*
954                 };
955                 if tcx.sess.verbose() {
956                     format!("{} [{}]", r, name).into()
957                 } else {
958                     r
959                 }
960             }
961
962             // FIXME(eddyb) Get more valid `Span`s on queries.
963             pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
964                 if !span.is_dummy() {
965                     return span;
966                 }
967                 // The `def_span` query is used to calculate `default_span`,
968                 // so exit to avoid infinite recursion.
969                 if let Query::def_span(..) = *self {
970                     return span
971                 }
972                 match *self {
973                     $(Query::$name(key) => key.default_span(tcx),)*
974                 }
975             }
976         }
977
978         impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
979             fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
980                 mem::discriminant(self).hash_stable(hcx, hasher);
981                 match *self {
982                     $(Query::$name(key) => key.hash_stable(hcx, hasher),)*
983                 }
984             }
985         }
986
987         pub mod queries {
988             use std::marker::PhantomData;
989
990             $(#[allow(nonstandard_style)]
991             pub struct $name<$tcx> {
992                 data: PhantomData<&$tcx ()>
993             })*
994         }
995
996         $(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
997             type Key = $K;
998             type Value = $V;
999             const NAME: &'static str = stringify!($name);
1000             const CATEGORY: ProfileCategory = $category;
1001         }
1002
1003         impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
1004             const ANON: bool = is_anon!([$($modifiers)*]);
1005             const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
1006             const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
1007
1008             type Cache = query_storage!(<$tcx>[$($modifiers)*][$K, $V]);
1009
1010             #[inline(always)]
1011             fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
1012                 &tcx.queries.$name
1013             }
1014
1015             #[allow(unused)]
1016             #[inline(always)]
1017             fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode {
1018                 DepConstructor::$node(tcx, *key)
1019             }
1020
1021             #[inline]
1022             fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
1023                 let provider = tcx.queries.providers.get(key.query_crate())
1024                     // HACK(eddyb) it's possible crates may be loaded after
1025                     // the query engine is created, and because crate loading
1026                     // is not yet integrated with the query engine, such crates
1027                     // would be missing appropriate entries in `providers`.
1028                     .unwrap_or(&tcx.queries.fallback_extern_providers)
1029                     .$name;
1030                 provider(tcx, key)
1031             }
1032
1033             fn hash_result(
1034                 _hcx: &mut StableHashingContext<'_>,
1035                 _result: &Self::Value
1036             ) -> Option<Fingerprint> {
1037                 hash_result!([$($modifiers)*][_hcx, _result])
1038             }
1039
1040             fn handle_cycle_error(
1041                 tcx: TyCtxt<'tcx>,
1042                 error: CycleError<Query<'tcx>>
1043             ) -> Self::Value {
1044                 handle_cycle_error!([$($modifiers)*][tcx, error])
1045             }
1046         })*
1047
1048         #[derive(Copy, Clone)]
1049         pub struct TyCtxtEnsure<'tcx> {
1050             pub tcx: TyCtxt<'tcx>,
1051         }
1052
1053         impl TyCtxtEnsure<$tcx> {
1054             $($(#[$attr])*
1055             #[inline(always)]
1056             pub fn $name(self, key: $K) {
1057                 self.tcx.ensure_query::<queries::$name<'_>>(key)
1058             })*
1059         }
1060
1061         #[derive(Copy, Clone)]
1062         pub struct TyCtxtAt<'tcx> {
1063             pub tcx: TyCtxt<'tcx>,
1064             pub span: Span,
1065         }
1066
1067         impl Deref for TyCtxtAt<'tcx> {
1068             type Target = TyCtxt<'tcx>;
1069             #[inline(always)]
1070             fn deref(&self) -> &Self::Target {
1071                 &self.tcx
1072             }
1073         }
1074
1075         impl TyCtxt<$tcx> {
1076             /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
1077             /// are executed instead of just returning their results.
1078             #[inline(always)]
1079             pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
1080                 TyCtxtEnsure {
1081                     tcx: self,
1082                 }
1083             }
1084
1085             /// Returns a transparent wrapper for `TyCtxt` which uses
1086             /// `span` as the location of queries performed through it.
1087             #[inline(always)]
1088             pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
1089                 TyCtxtAt {
1090                     tcx: self,
1091                     span
1092                 }
1093             }
1094
1095             $($(#[$attr])*
1096             #[inline(always)]
1097             pub fn $name(self, key: $K) -> $V {
1098                 self.at(DUMMY_SP).$name(key)
1099             })*
1100
1101             /// All self-profiling events generated by the query engine use
1102             /// virtual `StringId`s for their `event_id`. This method makes all
1103             /// those virtual `StringId`s point to actual strings.
1104             ///
1105             /// If we are recording only summary data, the ids will point to
1106             /// just the query names. If we are recording query keys too, we
1107             /// allocate the corresponding strings here.
1108             pub fn alloc_self_profile_query_strings(self) {
1109                 use crate::ty::query::profiling_support::{
1110                     alloc_self_profile_query_strings_for_query_cache,
1111                     QueryKeyStringCache,
1112                 };
1113
1114                 if !self.prof.enabled() {
1115                     return;
1116                 }
1117
1118                 let mut string_cache = QueryKeyStringCache::new();
1119
1120                 $({
1121                     alloc_self_profile_query_strings_for_query_cache(
1122                         self,
1123                         stringify!($name),
1124                         &self.queries.$name,
1125                         &mut string_cache,
1126                     );
1127                 })*
1128             }
1129         }
1130
1131         impl TyCtxtAt<$tcx> {
1132             $($(#[$attr])*
1133             #[inline(always)]
1134             pub fn $name(self, key: $K) -> $V {
1135                 self.tcx.get_query::<queries::$name<'_>>(self.span, key)
1136             })*
1137         }
1138
1139         define_provider_struct! {
1140             tcx: $tcx,
1141             input: ($(([$($modifiers)*] [$name] [$K] [$V]))*)
1142         }
1143
1144         impl<$tcx> Copy for Providers<$tcx> {}
1145         impl<$tcx> Clone for Providers<$tcx> {
1146             fn clone(&self) -> Self { *self }
1147         }
1148     }
1149 }
1150
1151 macro_rules! define_queries_struct {
1152     (tcx: $tcx:tt,
1153      input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
1154         pub struct Queries<$tcx> {
1155             /// This provides access to the incrimental comilation on-disk cache for query results.
1156             /// Do not access this directly. It is only meant to be used by
1157             /// `DepGraph::try_mark_green()` and the query infrastructure.
1158             pub(crate) on_disk_cache: OnDiskCache<'tcx>,
1159
1160             providers: IndexVec<CrateNum, Providers<$tcx>>,
1161             fallback_extern_providers: Box<Providers<$tcx>>,
1162
1163             $($(#[$attr])*  $name: QueryState<
1164                 TyCtxt<$tcx>,
1165                 <queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
1166             >,)*
1167         }
1168
1169         impl<$tcx> Queries<$tcx> {
1170             pub(crate) fn new(
1171                 providers: IndexVec<CrateNum, Providers<$tcx>>,
1172                 fallback_extern_providers: Providers<$tcx>,
1173                 on_disk_cache: OnDiskCache<'tcx>,
1174             ) -> Self {
1175                 Queries {
1176                     providers,
1177                     fallback_extern_providers: Box::new(fallback_extern_providers),
1178                     on_disk_cache,
1179                     $($name: Default::default()),*
1180                 }
1181             }
1182
1183             pub(crate) fn try_collect_active_jobs(
1184                 &self
1185             ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
1186                 let mut jobs = FxHashMap::default();
1187
1188                 $(
1189                     self.$name.try_collect_active_jobs(
1190                         <queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
1191                         Query::$name,
1192                         &mut jobs,
1193                     )?;
1194                 )*
1195
1196                 Some(jobs)
1197             }
1198         }
1199     };
1200 }
1201
1202 macro_rules! define_provider_struct {
1203     (tcx: $tcx:tt,
1204      input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
1205         pub struct Providers<$tcx> {
1206             $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)*
1207         }
1208
1209         impl<$tcx> Default for Providers<$tcx> {
1210             fn default() -> Self {
1211                 $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
1212                     bug!("`tcx.{}({:?})` unsupported by its crate",
1213                          stringify!($name), key);
1214                 })*
1215                 Providers { $($name),* }
1216             }
1217         }
1218     };
1219 }