]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/query/plumbing.rs
Rollup merge of #60772 - timvermeulen:slice_iter_nth_back, r=scottmcm
[rust.git] / src / librustc / ty / query / plumbing.rs
1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
4
5 use crate::dep_graph::{DepNodeIndex, DepNode, DepKind, SerializedDepNodeIndex};
6 use crate::ty::tls;
7 use crate::ty::{self, TyCtxt};
8 use crate::ty::query::Query;
9 use crate::ty::query::config::{QueryConfig, QueryDescription};
10 use crate::ty::query::job::{QueryJob, QueryResult, QueryInfo};
11
12 use crate::util::common::{profq_msg, ProfileQueriesMsg, QueryMsg};
13
14 use errors::DiagnosticBuilder;
15 use errors::Level;
16 use errors::Diagnostic;
17 use errors::FatalError;
18 use rustc_data_structures::fx::{FxHashMap};
19 use rustc_data_structures::sync::{Lrc, Lock};
20 use rustc_data_structures::thin_vec::ThinVec;
21 #[cfg(not(parallel_compiler))]
22 use rustc_data_structures::cold_path;
23 use std::mem;
24 use std::ptr;
25 use std::collections::hash_map::Entry;
26 use syntax_pos::Span;
27 use syntax::source_map::DUMMY_SP;
28
29 pub struct QueryCache<'tcx, D: QueryConfig<'tcx> + ?Sized> {
30     pub(super) results: FxHashMap<D::Key, QueryValue<D::Value>>,
31     pub(super) active: FxHashMap<D::Key, QueryResult<'tcx>>,
32     #[cfg(debug_assertions)]
33     pub(super) cache_hits: usize,
34 }
35
36 pub(super) struct QueryValue<T> {
37     pub(super) value: T,
38     pub(super) index: DepNodeIndex,
39 }
40
41 impl<T> QueryValue<T> {
42     pub(super) fn new(value: T,
43                       dep_node_index: DepNodeIndex)
44                       -> QueryValue<T> {
45         QueryValue {
46             value,
47             index: dep_node_index,
48         }
49     }
50 }
51
52 impl<'tcx, M: QueryConfig<'tcx>> Default for QueryCache<'tcx, M> {
53     fn default() -> QueryCache<'tcx, M> {
54         QueryCache {
55             results: FxHashMap::default(),
56             active: FxHashMap::default(),
57             #[cfg(debug_assertions)]
58             cache_hits: 0,
59         }
60     }
61 }
62
63 // If enabled, send a message to the profile-queries thread
64 macro_rules! profq_msg {
65     ($tcx:expr, $msg:expr) => {
66         if cfg!(debug_assertions) {
67             if $tcx.sess.profile_queries() {
68                 profq_msg($tcx.sess, $msg)
69             }
70         }
71     }
72 }
73
74 // If enabled, format a key using its debug string, which can be
75 // expensive to compute (in terms of time).
76 macro_rules! profq_query_msg {
77     ($query:expr, $tcx:expr, $key:expr) => {{
78         let msg = if cfg!(debug_assertions) {
79             if $tcx.sess.profile_queries_and_keys() {
80                 Some(format!("{:?}", $key))
81             } else { None }
82         } else { None };
83         QueryMsg {
84             query: $query,
85             msg,
86         }
87     }}
88 }
89
90 /// A type representing the responsibility to execute the job in the `job` field.
91 /// This will poison the relevant query if dropped.
92 pub(super) struct JobOwner<'a, 'tcx, Q: QueryDescription<'tcx> + 'a> {
93     cache: &'a Lock<QueryCache<'tcx, Q>>,
94     key: Q::Key,
95     job: Lrc<QueryJob<'tcx>>,
96 }
97
98 impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
99     /// Either gets a `JobOwner` corresponding the query, allowing us to
100     /// start executing the query, or it returns with the result of the query.
101     /// If the query is executing elsewhere, this will wait for it.
102     /// If the query panicked, this will silently panic.
103     ///
104     /// This function is inlined because that results in a noticeable speed-up
105     /// for some compile-time benchmarks.
106     #[inline(always)]
107     pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<'a, 'tcx, Q> {
108         let cache = Q::query_cache(tcx);
109         loop {
110             let mut lock = cache.borrow_mut();
111             if let Some(value) = lock.results.get(key) {
112                 profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
113                 tcx.sess.profiler(|p| p.record_query_hit(Q::NAME));
114                 let result = (value.value.clone(), value.index);
115                 #[cfg(debug_assertions)]
116                 {
117                     lock.cache_hits += 1;
118                 }
119                 return TryGetJob::JobCompleted(result);
120             }
121             let job = match lock.active.entry((*key).clone()) {
122                 Entry::Occupied(entry) => {
123                     match *entry.get() {
124                         QueryResult::Started(ref job) => {
125                             // For parallel queries, we'll block and wait until the query running
126                             // in another thread has completed. Record how long we wait in the
127                             // self-profiler.
128                             #[cfg(parallel_compiler)]
129                             tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME));
130
131                             job.clone()
132                         },
133                         QueryResult::Poisoned => FatalError.raise(),
134                     }
135                 }
136                 Entry::Vacant(entry) => {
137                     // No job entry for this query. Return a new one to be started later.
138                     return tls::with_related_context(tcx, |icx| {
139                         // Create the `parent` variable before `info`. This allows LLVM
140                         // to elide the move of `info`
141                         let parent = icx.query.clone();
142                         let info = QueryInfo {
143                             span,
144                             query: Q::query(key.clone()),
145                         };
146                         let job = Lrc::new(QueryJob::new(info, parent));
147                         let owner = JobOwner {
148                             cache,
149                             job: job.clone(),
150                             key: (*key).clone(),
151                         };
152                         entry.insert(QueryResult::Started(job));
153                         TryGetJob::NotYetStarted(owner)
154                     })
155                 }
156             };
157             mem::drop(lock);
158
159             // If we are single-threaded we know that we have cycle error,
160             // so we just return the error.
161             #[cfg(not(parallel_compiler))]
162             return TryGetJob::Cycle(cold_path(|| {
163                 Q::handle_cycle_error(tcx, job.find_cycle_in_stack(tcx, span))
164             }));
165
166             // With parallel queries we might just have to wait on some other
167             // thread.
168             #[cfg(parallel_compiler)]
169             {
170                 let result = job.r#await(tcx, span);
171                 tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME));
172
173                 if let Err(cycle) = result {
174                     return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
175                 }
176             }
177         }
178     }
179
180     /// Completes the query by updating the query cache with the `result`,
181     /// signals the waiter and forgets the JobOwner, so it won't poison the query
182     #[inline(always)]
183     pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) {
184         // We can move out of `self` here because we `mem::forget` it below
185         let key = unsafe { ptr::read(&self.key) };
186         let job = unsafe { ptr::read(&self.job) };
187         let cache = self.cache;
188
189         // Forget ourself so our destructor won't poison the query
190         mem::forget(self);
191
192         let value = QueryValue::new(result.clone(), dep_node_index);
193         {
194             let mut lock = cache.borrow_mut();
195             lock.active.remove(&key);
196             lock.results.insert(key, value);
197         }
198
199         job.signal_complete();
200     }
201 }
202
203 #[inline(always)]
204 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
205 where
206     F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R
207 {
208     let diagnostics = Lock::new(ThinVec::new());
209     let result = f(Some(&diagnostics));
210     (result, diagnostics.into_inner())
211 }
212
213 impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
214     #[inline(never)]
215     #[cold]
216     fn drop(&mut self) {
217         // Poison the query so jobs waiting on it panic
218         self.cache.borrow_mut().active.insert(self.key.clone(), QueryResult::Poisoned);
219         // Also signal the completion of the job, so waiters
220         // will continue execution
221         self.job.signal_complete();
222     }
223 }
224
225 #[derive(Clone)]
226 pub struct CycleError<'tcx> {
227     /// The query and related span which uses the cycle
228     pub(super) usage: Option<(Span, Query<'tcx>)>,
229     pub(super) cycle: Vec<QueryInfo<'tcx>>,
230 }
231
232 /// The result of `try_get_lock`
233 pub(super) enum TryGetJob<'a, 'tcx, D: QueryDescription<'tcx> + 'a> {
234     /// The query is not yet started. Contains a guard to the cache eventually used to start it.
235     NotYetStarted(JobOwner<'a, 'tcx, D>),
236
237     /// The query was already completed.
238     /// Returns the result of the query and its dep node index
239     /// if it succeeded or a cycle error if it failed
240     JobCompleted((D::Value, DepNodeIndex)),
241
242     /// Trying to execute the query resulted in a cycle.
243     Cycle(D::Value),
244 }
245
246 impl<'tcx> TyCtxt<'tcx> {
247     /// Executes a job by changing the ImplicitCtxt to point to the
248     /// new query job while it executes. It returns the diagnostics
249     /// captured during execution and the actual result.
250     #[inline(always)]
251     pub(super) fn start_query<F, R>(
252         self,
253         job: Lrc<QueryJob<'tcx>>,
254         diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
255         compute: F,
256     ) -> R
257     where
258         F: FnOnce(TyCtxt<'tcx>) -> R,
259     {
260         // The TyCtxt stored in TLS has the same global interner lifetime
261         // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
262         // when accessing the ImplicitCtxt
263         tls::with_related_context(self, move |current_icx| {
264             // Update the ImplicitCtxt to point to our new query job
265             let new_icx = tls::ImplicitCtxt {
266                 tcx: self.global_tcx(),
267                 query: Some(job),
268                 diagnostics,
269                 layout_depth: current_icx.layout_depth,
270                 task_deps: current_icx.task_deps,
271             };
272
273             // Use the ImplicitCtxt while we execute the query
274             tls::enter_context(&new_icx, |_| {
275                 compute(self.global_tcx())
276             })
277         })
278     }
279
280     #[inline(never)]
281     #[cold]
282     pub(super) fn report_cycle(
283         self,
284         CycleError { usage, cycle: stack }: CycleError<'tcx>,
285     ) -> DiagnosticBuilder<'tcx> {
286         assert!(!stack.is_empty());
287
288         let fix_span = |span: Span, query: &Query<'tcx>| {
289             self.sess.source_map().def_span(query.default_span(self, span))
290         };
291
292         // Disable naming impls with types in this path, since that
293         // sometimes cycles itself, leading to extra cycle errors.
294         // (And cycle errors around impls tend to occur during the
295         // collect/coherence phases anyhow.)
296         ty::print::with_forced_impl_filename_line(|| {
297             let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
298             let mut err = struct_span_err!(self.sess,
299                                            span,
300                                            E0391,
301                                            "cycle detected when {}",
302                                            stack[0].query.describe(self));
303
304             for i in 1..stack.len() {
305                 let query = &stack[i].query;
306                 let span = fix_span(stack[(i + 1) % stack.len()].span, query);
307                 err.span_note(span, &format!("...which requires {}...", query.describe(self)));
308             }
309
310             err.note(&format!("...which again requires {}, completing the cycle",
311                               stack[0].query.describe(self)));
312
313             if let Some((span, query)) = usage {
314                 err.span_note(fix_span(span, &query),
315                               &format!("cycle used when {}", query.describe(self)));
316             }
317
318             err
319         })
320     }
321
322     pub fn try_print_query_stack() {
323         eprintln!("query stack during panic:");
324
325         tls::with_context_opt(|icx| {
326             if let Some(icx) = icx {
327                 let mut current_query = icx.query.clone();
328                 let mut i = 0;
329
330                 while let Some(query) = current_query {
331                     let mut db = DiagnosticBuilder::new(icx.tcx.sess.diagnostic(),
332                         Level::FailureNote,
333                         &format!("#{} [{}] {}",
334                                  i,
335                                  query.info.query.name(),
336                                  query.info.query.describe(icx.tcx)));
337                     db.set_span(icx.tcx.sess.source_map().def_span(query.info.span));
338                     icx.tcx.sess.diagnostic().force_print_db(db);
339
340                     current_query = query.parent.clone();
341                     i += 1;
342                 }
343             }
344         });
345
346         eprintln!("end of query stack");
347     }
348
349     #[inline(never)]
350     pub(super) fn get_query<Q: QueryDescription<'tcx>>(self, span: Span, key: Q::Key) -> Q::Value {
351         debug!("ty::query::get_query<{}>(key={:?}, span={:?})",
352                Q::NAME.as_str(),
353                key,
354                span);
355
356         profq_msg!(self,
357             ProfileQueriesMsg::QueryBegin(
358                 span.data(),
359                 profq_query_msg!(Q::NAME.as_str(), self, key),
360             )
361         );
362
363         let job = match JobOwner::try_get(self, span, &key) {
364             TryGetJob::NotYetStarted(job) => job,
365             TryGetJob::Cycle(result) => return result,
366             TryGetJob::JobCompleted((v, index)) => {
367                 self.dep_graph.read_index(index);
368                 return v
369             }
370         };
371
372         // Fast path for when incr. comp. is off. `to_dep_node` is
373         // expensive for some DepKinds.
374         if !self.dep_graph.is_fully_enabled() {
375             let null_dep_node = DepNode::new_no_params(crate::dep_graph::DepKind::Null);
376             return self.force_query_with_job::<Q>(key, job, null_dep_node).0;
377         }
378
379         let dep_node = Q::to_dep_node(self, &key);
380
381         if dep_node.kind.is_anon() {
382             profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
383             self.sess.profiler(|p| p.start_query(Q::NAME));
384
385             let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
386                 self.start_query(job.job.clone(), diagnostics, |tcx| {
387                     tcx.dep_graph.with_anon_task(dep_node.kind, || {
388                         Q::compute(tcx.global_tcx(), key)
389                     })
390                 })
391             });
392
393             self.sess.profiler(|p| p.end_query(Q::NAME));
394             profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
395
396             self.dep_graph.read_index(dep_node_index);
397
398             if unlikely!(!diagnostics.is_empty()) {
399                 self.queries.on_disk_cache
400                     .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
401             }
402
403             job.complete(&result, dep_node_index);
404
405             return result;
406         }
407
408         if !dep_node.kind.is_eval_always() {
409             // The diagnostics for this query will be
410             // promoted to the current session during
411             // try_mark_green(), so we can ignore them here.
412             let loaded = self.start_query(job.job.clone(), None, |tcx| {
413                 let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
414                 marked.map(|(prev_dep_node_index, dep_node_index)| {
415                     (tcx.load_from_disk_and_cache_in_memory::<Q>(
416                         key.clone(),
417                         prev_dep_node_index,
418                         dep_node_index,
419                         &dep_node
420                     ), dep_node_index)
421                 })
422             });
423             if let Some((result, dep_node_index)) = loaded {
424                 job.complete(&result, dep_node_index);
425                 return result;
426             }
427         }
428
429         let (result, dep_node_index) = self.force_query_with_job::<Q>(key, job, dep_node);
430         self.dep_graph.read_index(dep_node_index);
431         result
432     }
433
434     fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'tcx>>(
435         self,
436         key: Q::Key,
437         prev_dep_node_index: SerializedDepNodeIndex,
438         dep_node_index: DepNodeIndex,
439         dep_node: &DepNode,
440     ) -> Q::Value {
441         // Note this function can be called concurrently from the same query
442         // We must ensure that this is handled correctly
443
444         debug_assert!(self.dep_graph.is_green(dep_node));
445
446         // First we try to load the result from the on-disk cache
447         let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) &&
448                         self.sess.opts.debugging_opts.incremental_queries {
449             self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME));
450             let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index);
451             self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME));
452
453             // We always expect to find a cached result for things that
454             // can be forced from DepNode.
455             debug_assert!(!dep_node.kind.can_reconstruct_query_key() ||
456                           result.is_some(),
457                           "Missing on-disk cache entry for {:?}",
458                           dep_node);
459             result
460         } else {
461             // Some things are never cached on disk.
462             None
463         };
464
465         let result = if let Some(result) = result {
466             profq_msg!(self, ProfileQueriesMsg::CacheHit);
467             self.sess.profiler(|p| p.record_query_hit(Q::NAME));
468
469             result
470         } else {
471             // We could not load a result from the on-disk cache, so
472             // recompute.
473
474             self.sess.profiler(|p| p.start_query(Q::NAME));
475
476             // The dep-graph for this computation is already in
477             // place
478             let result = self.dep_graph.with_ignore(|| {
479                 Q::compute(self, key)
480             });
481
482             self.sess.profiler(|p| p.end_query(Q::NAME));
483             result
484         };
485
486         // If -Zincremental-verify-ich is specified, re-hash results from
487         // the cache and make sure that they have the expected fingerprint.
488         if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) {
489             self.incremental_verify_ich::<Q>(&result, dep_node, dep_node_index);
490         }
491
492         if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
493             self.dep_graph.mark_loaded_from_cache(dep_node_index, true);
494         }
495
496         result
497     }
498
499     #[inline(never)]
500     #[cold]
501     fn incremental_verify_ich<Q: QueryDescription<'tcx>>(
502         self,
503         result: &Q::Value,
504         dep_node: &DepNode,
505         dep_node_index: DepNodeIndex,
506     ) {
507         use crate::ich::Fingerprint;
508
509         assert!(Some(self.dep_graph.fingerprint_of(dep_node_index)) ==
510                 self.dep_graph.prev_fingerprint_of(dep_node),
511                 "Fingerprint for green query instance not loaded \
512                     from cache: {:?}", dep_node);
513
514         debug!("BEGIN verify_ich({:?})", dep_node);
515         let mut hcx = self.create_stable_hashing_context();
516
517         let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
518         debug!("END verify_ich({:?})", dep_node);
519
520         let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
521
522         assert!(new_hash == old_hash, "Found unstable fingerprints \
523             for {:?}", dep_node);
524     }
525
526     #[inline(always)]
527     fn force_query_with_job<Q: QueryDescription<'tcx>>(
528         self,
529         key: Q::Key,
530         job: JobOwner<'_, 'tcx, Q>,
531         dep_node: DepNode,
532     ) -> (Q::Value, DepNodeIndex) {
533         // If the following assertion triggers, it can have two reasons:
534         // 1. Something is wrong with DepNode creation, either here or
535         //    in DepGraph::try_mark_green()
536         // 2. Two distinct query keys get mapped to the same DepNode
537         //    (see for example #48923)
538         assert!(!self.dep_graph.dep_node_exists(&dep_node),
539                 "Forcing query with already existing DepNode.\n\
540                  - query-key: {:?}\n\
541                  - dep-node: {:?}",
542                 key, dep_node);
543
544         profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
545         self.sess.profiler(|p| p.start_query(Q::NAME));
546
547         let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
548             self.start_query(job.job.clone(), diagnostics, |tcx| {
549                 if dep_node.kind.is_eval_always() {
550                     tcx.dep_graph.with_eval_always_task(dep_node,
551                                                         tcx,
552                                                         key,
553                                                         Q::compute,
554                                                         Q::hash_result)
555                 } else {
556                     tcx.dep_graph.with_task(dep_node,
557                                             tcx,
558                                             key,
559                                             Q::compute,
560                                             Q::hash_result)
561                 }
562             })
563         });
564
565         self.sess.profiler(|p| p.end_query(Q::NAME));
566         profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
567
568         if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
569             self.dep_graph.mark_loaded_from_cache(dep_node_index, false);
570         }
571
572         if dep_node.kind != crate::dep_graph::DepKind::Null {
573             if unlikely!(!diagnostics.is_empty()) {
574                 self.queries.on_disk_cache
575                     .store_diagnostics(dep_node_index, diagnostics);
576             }
577         }
578
579         job.complete(&result, dep_node_index);
580
581         (result, dep_node_index)
582     }
583
584     /// Ensure that either this query has all green inputs or been executed.
585     /// Executing query::ensure(D) is considered a read of the dep-node D.
586     ///
587     /// This function is particularly useful when executing passes for their
588     /// side-effects -- e.g., in order to report errors for erroneous programs.
589     ///
590     /// Note: The optimization is only available during incr. comp.
591     pub(super) fn ensure_query<Q: QueryDescription<'tcx>>(self, key: Q::Key) -> () {
592         let dep_node = Q::to_dep_node(self, &key);
593
594         if dep_node.kind.is_eval_always() {
595             let _ = self.get_query::<Q>(DUMMY_SP, key);
596             return;
597         }
598
599         // Ensuring an anonymous query makes no sense
600         assert!(!dep_node.kind.is_anon());
601         if self.dep_graph.try_mark_green_and_read(self, &dep_node).is_none() {
602             // A None return from `try_mark_green_and_read` means that this is either
603             // a new dep node or that the dep node has already been marked red.
604             // Either way, we can't call `dep_graph.read()` as we don't have the
605             // DepNodeIndex. We must invoke the query itself. The performance cost
606             // this introduces should be negligible as we'll immediately hit the
607             // in-memory cache, or another query down the line will.
608
609             let _ = self.get_query::<Q>(DUMMY_SP, key);
610         } else {
611             profq_msg!(self, ProfileQueriesMsg::CacheHit);
612             self.sess.profiler(|p| p.record_query_hit(Q::NAME));
613         }
614     }
615
616     #[allow(dead_code)]
617     fn force_query<Q: QueryDescription<'tcx>>(self, key: Q::Key, span: Span, dep_node: DepNode) {
618         profq_msg!(
619             self,
620             ProfileQueriesMsg::QueryBegin(span.data(),
621                                           profq_query_msg!(Q::NAME.as_str(), self, key))
622         );
623
624         // We may be concurrently trying both execute and force a query.
625         // Ensure that only one of them runs the query.
626         let job = match JobOwner::try_get(self, span, &key) {
627             TryGetJob::NotYetStarted(job) => job,
628             TryGetJob::Cycle(_) |
629             TryGetJob::JobCompleted(_) => {
630                 return
631             }
632         };
633         self.force_query_with_job::<Q>(key, job, dep_node);
634     }
635 }
636
637 macro_rules! handle_cycle_error {
638     ([][$tcx: expr, $error:expr]) => {{
639         $tcx.report_cycle($error).emit();
640         Value::from_cycle_error($tcx.global_tcx())
641     }};
642     ([fatal_cycle$(, $modifiers:ident)*][$tcx:expr, $error:expr]) => {{
643         $tcx.report_cycle($error).emit();
644         $tcx.sess.abort_if_errors();
645         unreachable!()
646     }};
647     ([cycle_delay_bug$(, $modifiers:ident)*][$tcx:expr, $error:expr]) => {{
648         $tcx.report_cycle($error).delay_as_bug();
649         Value::from_cycle_error($tcx.global_tcx())
650     }};
651     ([$other:ident$(, $modifiers:ident)*][$($args:tt)*]) => {
652         handle_cycle_error!([$($modifiers),*][$($args)*])
653     };
654 }
655
656 macro_rules! hash_result {
657     ([][$hcx:expr, $result:expr]) => {{
658         dep_graph::hash_result($hcx, &$result)
659     }};
660     ([no_hash$(, $modifiers:ident)*][$hcx:expr, $result:expr]) => {{
661         None
662     }};
663     ([$other:ident$(, $modifiers:ident)*][$($args:tt)*]) => {
664         hash_result!([$($modifiers),*][$($args)*])
665     };
666 }
667
668 macro_rules! define_queries {
669     (<$tcx:tt> $($category:tt {
670         $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
671     },)*) => {
672         define_queries_inner! { <$tcx>
673             $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)*
674         }
675     }
676 }
677
678 macro_rules! define_queries_inner {
679     (<$tcx:tt>
680      $($(#[$attr:meta])* category<$category:tt>
681         [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
682
683         use std::mem;
684         #[cfg(parallel_compiler)]
685         use ty::query::job::QueryResult;
686         use rustc_data_structures::sync::Lock;
687         use crate::{
688             rustc_data_structures::stable_hasher::HashStable,
689             rustc_data_structures::stable_hasher::StableHasherResult,
690             rustc_data_structures::stable_hasher::StableHasher,
691             ich::StableHashingContext
692         };
693         use crate::util::profiling::ProfileCategory;
694
695         define_queries_struct! {
696             tcx: $tcx,
697             input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
698         }
699
700         impl<$tcx> Queries<$tcx> {
701             pub fn new(
702                 providers: IndexVec<CrateNum, Providers<$tcx>>,
703                 fallback_extern_providers: Providers<$tcx>,
704                 on_disk_cache: OnDiskCache<'tcx>,
705             ) -> Self {
706                 Queries {
707                     providers,
708                     fallback_extern_providers: Box::new(fallback_extern_providers),
709                     on_disk_cache,
710                     $($name: Default::default()),*
711                 }
712             }
713
714             #[cfg(parallel_compiler)]
715             pub fn collect_active_jobs(&self) -> Vec<Lrc<QueryJob<$tcx>>> {
716                 let mut jobs = Vec::new();
717
718                 // We use try_lock here since we are only called from the
719                 // deadlock handler, and this shouldn't be locked.
720                 $(
721                     jobs.extend(
722                         self.$name.try_lock().unwrap().active.values().filter_map(|v|
723                             if let QueryResult::Started(ref job) = *v {
724                                 Some(job.clone())
725                             } else {
726                                 None
727                             }
728                         )
729                     );
730                 )*
731
732                 jobs
733             }
734
735             pub fn print_stats(&self) {
736                 let mut queries = Vec::new();
737
738                 #[derive(Clone)]
739                 struct QueryStats {
740                     name: &'static str,
741                     cache_hits: usize,
742                     key_size: usize,
743                     key_type: &'static str,
744                     value_size: usize,
745                     value_type: &'static str,
746                     entry_count: usize,
747                 }
748
749                 fn stats<'tcx, Q: QueryConfig<'tcx>>(
750                     name: &'static str,
751                     map: &QueryCache<'tcx, Q>
752                 ) -> QueryStats {
753                     QueryStats {
754                         name,
755                         #[cfg(debug_assertions)]
756                         cache_hits: map.cache_hits,
757                         #[cfg(not(debug_assertions))]
758                         cache_hits: 0,
759                         key_size: mem::size_of::<Q::Key>(),
760                         key_type: unsafe { type_name::<Q::Key>() },
761                         value_size: mem::size_of::<Q::Value>(),
762                         value_type: unsafe { type_name::<Q::Value>() },
763                         entry_count: map.results.len(),
764                     }
765                 }
766
767                 $(
768                     queries.push(stats::<queries::$name<'_>>(
769                         stringify!($name),
770                         &*self.$name.lock()
771                     ));
772                 )*
773
774                 if cfg!(debug_assertions) {
775                     let hits: usize = queries.iter().map(|s| s.cache_hits).sum();
776                     let results: usize = queries.iter().map(|s| s.entry_count).sum();
777                     println!("\nQuery cache hit rate: {}", hits as f64 / (hits + results) as f64);
778                 }
779
780                 let mut query_key_sizes = queries.clone();
781                 query_key_sizes.sort_by_key(|q| q.key_size);
782                 println!("\nLarge query keys:");
783                 for q in query_key_sizes.iter().rev()
784                                         .filter(|q| q.key_size > 8) {
785                     println!(
786                         "   {} - {} x {} - {}",
787                         q.name,
788                         q.key_size,
789                         q.entry_count,
790                         q.key_type
791                     );
792                 }
793
794                 let mut query_value_sizes = queries.clone();
795                 query_value_sizes.sort_by_key(|q| q.value_size);
796                 println!("\nLarge query values:");
797                 for q in query_value_sizes.iter().rev()
798                                           .filter(|q| q.value_size > 8) {
799                     println!(
800                         "   {} - {} x {} - {}",
801                         q.name,
802                         q.value_size,
803                         q.entry_count,
804                         q.value_type
805                     );
806                 }
807
808                 if cfg!(debug_assertions) {
809                     let mut query_cache_hits = queries.clone();
810                     query_cache_hits.sort_by_key(|q| q.cache_hits);
811                     println!("\nQuery cache hits:");
812                     for q in query_cache_hits.iter().rev() {
813                         println!(
814                             "   {} - {} ({}%)",
815                             q.name,
816                             q.cache_hits,
817                             q.cache_hits as f64 / (q.cache_hits + q.entry_count) as f64
818                         );
819                     }
820                 }
821
822                 let mut query_value_count = queries.clone();
823                 query_value_count.sort_by_key(|q| q.entry_count);
824                 println!("\nQuery value count:");
825                 for q in query_value_count.iter().rev() {
826                     println!("   {} - {}", q.name, q.entry_count);
827                 }
828             }
829         }
830
831         #[allow(nonstandard_style)]
832         #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
833         pub enum QueryName {
834             $($name),*
835         }
836
837         impl QueryName {
838             pub fn register_with_profiler(profiler: &crate::util::profiling::SelfProfiler) {
839                 $(profiler.register_query_name(QueryName::$name);)*
840             }
841
842             pub fn as_str(&self) -> &'static str {
843                 match self {
844                     $(QueryName::$name => stringify!($name),)*
845                 }
846             }
847         }
848
849         #[allow(nonstandard_style)]
850         #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
851         pub enum Query<$tcx> {
852             $($(#[$attr])* $name($K)),*
853         }
854
855         impl<$tcx> Query<$tcx> {
856             pub fn name(&self) -> &'static str {
857                 match *self {
858                     $(Query::$name(_) => stringify!($name),)*
859                 }
860             }
861
862             pub fn describe(&self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
863                 let (r, name) = match *self {
864                     $(Query::$name(key) => {
865                         (queries::$name::describe(tcx, key), stringify!($name))
866                     })*
867                 };
868                 if tcx.sess.verbose() {
869                     format!("{} [{}]", r, name).into()
870                 } else {
871                     r
872                 }
873             }
874
875             // FIXME(eddyb) Get more valid Span's on queries.
876             pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
877                 if !span.is_dummy() {
878                     return span;
879                 }
880                 // The def_span query is used to calculate default_span,
881                 // so exit to avoid infinite recursion
882                 if let Query::def_span(..) = *self {
883                     return span
884                 }
885                 match *self {
886                     $(Query::$name(key) => key.default_span(tcx),)*
887                 }
888             }
889
890             pub fn query_name(&self) -> QueryName {
891                 match self {
892                     $(Query::$name(_) => QueryName::$name,)*
893                 }
894             }
895         }
896
897         impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
898             fn hash_stable<W: StableHasherResult>(&self,
899                                                 hcx: &mut StableHashingContext<'a>,
900                                                 hasher: &mut StableHasher<W>) {
901                 mem::discriminant(self).hash_stable(hcx, hasher);
902                 match *self {
903                     $(Query::$name(key) => key.hash_stable(hcx, hasher),)*
904                 }
905             }
906         }
907
908         pub mod queries {
909             use std::marker::PhantomData;
910
911             $(#[allow(nonstandard_style)]
912             pub struct $name<$tcx> {
913                 data: PhantomData<&$tcx ()>
914             })*
915         }
916
917         // This module and the functions in it exist only to provide a
918         // predictable symbol name prefix for query providers. This is helpful
919         // for analyzing queries in profilers.
920         pub(super) mod __query_compute {
921             $(#[inline(never)]
922             pub fn $name<F: FnOnce() -> R, R>(f: F) -> R {
923                 f()
924             })*
925         }
926
927         $(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> {
928             type Key = $K;
929             type Value = $V;
930
931             const NAME: QueryName = QueryName::$name;
932             const CATEGORY: ProfileCategory = $category;
933         }
934
935         impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> {
936             #[inline(always)]
937             fn query(key: Self::Key) -> Query<'tcx> {
938                 Query::$name(key)
939             }
940
941             #[inline(always)]
942             fn query_cache<'a>(tcx: TyCtxt<$tcx>) -> &'a Lock<QueryCache<$tcx, Self>> {
943                 &tcx.queries.$name
944             }
945
946             #[allow(unused)]
947             #[inline(always)]
948             fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode {
949                 use crate::dep_graph::DepConstructor::*;
950
951                 DepNode::new(tcx, $node(*key))
952             }
953
954             #[inline]
955             fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
956                 __query_compute::$name(move || {
957                     let provider = tcx.queries.providers.get(key.query_crate())
958                         // HACK(eddyb) it's possible crates may be loaded after
959                         // the query engine is created, and because crate loading
960                         // is not yet integrated with the query engine, such crates
961                         // would be missing appropriate entries in `providers`.
962                         .unwrap_or(&tcx.queries.fallback_extern_providers)
963                         .$name;
964                     provider(tcx.global_tcx(), key)
965                 })
966             }
967
968             fn hash_result(
969                 _hcx: &mut StableHashingContext<'_>,
970                 _result: &Self::Value
971             ) -> Option<Fingerprint> {
972                 hash_result!([$($modifiers)*][_hcx, _result])
973             }
974
975             fn handle_cycle_error(
976                 tcx: TyCtxt<'tcx>,
977                 error: CycleError<'tcx>
978             ) -> Self::Value {
979                 handle_cycle_error!([$($modifiers)*][tcx, error])
980             }
981         })*
982
983         #[derive(Copy, Clone)]
984         pub struct TyCtxtEnsure<'tcx> {
985             pub tcx: TyCtxt<'tcx>,
986         }
987
988         impl TyCtxtEnsure<$tcx> {
989             $($(#[$attr])*
990             #[inline(always)]
991             pub fn $name(self, key: $K) {
992                 self.tcx.ensure_query::<queries::$name<'_>>(key)
993             })*
994         }
995
996         #[derive(Copy, Clone)]
997         pub struct TyCtxtAt<'tcx> {
998             pub tcx: TyCtxt<'tcx>,
999             pub span: Span,
1000         }
1001
1002         impl Deref for TyCtxtAt<'tcx> {
1003             type Target = TyCtxt<'tcx>;
1004             #[inline(always)]
1005             fn deref(&self) -> &Self::Target {
1006                 &self.tcx
1007             }
1008         }
1009
1010         impl TyCtxt<$tcx> {
1011             /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
1012             /// are executed instead of just returing their results.
1013             #[inline(always)]
1014             pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
1015                 TyCtxtEnsure {
1016                     tcx: self,
1017                 }
1018             }
1019
1020             /// Returns a transparent wrapper for `TyCtxt` which uses
1021             /// `span` as the location of queries performed through it.
1022             #[inline(always)]
1023             pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
1024                 TyCtxtAt {
1025                     tcx: self,
1026                     span
1027                 }
1028             }
1029
1030             $($(#[$attr])*
1031             #[inline(always)]
1032             pub fn $name(self, key: $K) -> $V {
1033                 self.at(DUMMY_SP).$name(key)
1034             })*
1035         }
1036
1037         impl TyCtxtAt<$tcx> {
1038             $($(#[$attr])*
1039             #[inline(always)]
1040             pub fn $name(self, key: $K) -> $V {
1041                 self.tcx.get_query::<queries::$name<'_>>(self.span, key)
1042             })*
1043         }
1044
1045         define_provider_struct! {
1046             tcx: $tcx,
1047             input: ($(([$($modifiers)*] [$name] [$K] [$V]))*)
1048         }
1049
1050         impl<$tcx> Copy for Providers<$tcx> {}
1051         impl<$tcx> Clone for Providers<$tcx> {
1052             fn clone(&self) -> Self { *self }
1053         }
1054     }
1055 }
1056
1057 macro_rules! define_queries_struct {
1058     (tcx: $tcx:tt,
1059      input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
1060         pub struct Queries<$tcx> {
1061             /// This provides access to the incrimental comilation on-disk cache for query results.
1062             /// Do not access this directly. It is only meant to be used by
1063             /// `DepGraph::try_mark_green()` and the query infrastructure.
1064             pub(crate) on_disk_cache: OnDiskCache<'tcx>,
1065
1066             providers: IndexVec<CrateNum, Providers<$tcx>>,
1067             fallback_extern_providers: Box<Providers<$tcx>>,
1068
1069             $($(#[$attr])*  $name: Lock<QueryCache<$tcx, queries::$name<$tcx>>>,)*
1070         }
1071     };
1072 }
1073
1074 macro_rules! define_provider_struct {
1075     (tcx: $tcx:tt,
1076      input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
1077         pub struct Providers<$tcx> {
1078             $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)*
1079         }
1080
1081         impl<$tcx> Default for Providers<$tcx> {
1082             fn default() -> Self {
1083                 $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
1084                     bug!("tcx.{}({:?}) unsupported by its crate",
1085                          stringify!($name), key);
1086                 })*
1087                 Providers { $($name),* }
1088             }
1089         }
1090     };
1091 }
1092
1093
1094 /// The red/green evaluation system will try to mark a specific DepNode in the
1095 /// dependency graph as green by recursively trying to mark the dependencies of
1096 /// that DepNode as green. While doing so, it will sometimes encounter a DepNode
1097 /// where we don't know if it is red or green and we therefore actually have
1098 /// to recompute its value in order to find out. Since the only piece of
1099 /// information that we have at that point is the DepNode we are trying to
1100 /// re-evaluate, we need some way to re-run a query from just that. This is what
1101 /// `force_from_dep_node()` implements.
1102 ///
1103 /// In the general case, a DepNode consists of a DepKind and an opaque
1104 /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
1105 /// is usually constructed by computing a stable hash of the query-key that the
1106 /// DepNode corresponds to. Consequently, it is not in general possible to go
1107 /// back from hash to query-key (since hash functions are not reversible). For
1108 /// this reason `force_from_dep_node()` is expected to fail from time to time
1109 /// because we just cannot find out, from the DepNode alone, what the
1110 /// corresponding query-key is and therefore cannot re-run the query.
1111 ///
1112 /// The system deals with this case letting `try_mark_green` fail which forces
1113 /// the root query to be re-evaluated.
1114 ///
1115 /// Now, if force_from_dep_node() would always fail, it would be pretty useless.
1116 /// Fortunately, we can use some contextual information that will allow us to
1117 /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
1118 /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
1119 /// valid `DefPathHash`. Since we also always build a huge table that maps every
1120 /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
1121 /// everything we need to re-run the query.
1122 ///
1123 /// Take the `mir_validated` query as an example. Like many other queries, it
1124 /// just has a single parameter: the `DefId` of the item it will compute the
1125 /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
1126 /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
1127 /// is actually a `DefPathHash`, and can therefore just look up the corresponding
1128 /// `DefId` in `tcx.def_path_hash_to_def_id`.
1129 ///
1130 /// When you implement a new query, it will likely have a corresponding new
1131 /// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
1132 /// a rule of thumb, if your query takes a `DefId` or `DefIndex` as sole parameter,
1133 /// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
1134 /// add it to the "We don't have enough information to reconstruct..." group in
1135 /// the match below.
1136 pub fn force_from_dep_node<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool {
1137     use crate::dep_graph::RecoverKey;
1138
1139     // We must avoid ever having to call force_from_dep_node() for a
1140     // DepNode::codegen_unit:
1141     // Since we cannot reconstruct the query key of a DepNode::codegen_unit, we
1142     // would always end up having to evaluate the first caller of the
1143     // `codegen_unit` query that *is* reconstructible. This might very well be
1144     // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
1145     // to re-trigger calling the `codegen_unit` query with the right key. At
1146     // that point we would already have re-done all the work we are trying to
1147     // avoid doing in the first place.
1148     // The solution is simple: Just explicitly call the `codegen_unit` query for
1149     // each CGU, right after partitioning. This way `try_mark_green` will always
1150     // hit the cache instead of having to go through `force_from_dep_node`.
1151     // This assertion makes sure, we actually keep applying the solution above.
1152     debug_assert!(dep_node.kind != DepKind::codegen_unit,
1153                   "calling force_from_dep_node() on DepKind::codegen_unit");
1154
1155     if !dep_node.kind.can_reconstruct_query_key() {
1156         return false
1157     }
1158
1159     macro_rules! def_id {
1160         () => {
1161             if let Some(def_id) = dep_node.extract_def_id(tcx) {
1162                 def_id
1163             } else {
1164                 // return from the whole function
1165                 return false
1166             }
1167         }
1168     };
1169
1170     macro_rules! krate {
1171         () => { (def_id!()).krate }
1172     };
1173
1174     macro_rules! force_ex {
1175         ($tcx:expr, $query:ident, $key:expr) => {
1176             {
1177                 $tcx.force_query::<crate::ty::query::queries::$query<'_>>(
1178                     $key,
1179                     DUMMY_SP,
1180                     *dep_node
1181                 );
1182             }
1183         }
1184     };
1185
1186     macro_rules! force {
1187         ($query:ident, $key:expr) => { force_ex!(tcx, $query, $key) }
1188     };
1189
1190     rustc_dep_node_force!([dep_node, tcx]
1191         // These are inputs that are expected to be pre-allocated and that
1192         // should therefore always be red or green already
1193         DepKind::AllLocalTraitImpls |
1194         DepKind::Krate |
1195         DepKind::CrateMetadata |
1196         DepKind::HirBody |
1197         DepKind::Hir |
1198
1199         // This are anonymous nodes
1200         DepKind::TraitSelect |
1201
1202         // We don't have enough information to reconstruct the query key of
1203         // these
1204         DepKind::CompileCodegenUnit => {
1205             bug!("force_from_dep_node() - Encountered {:?}", dep_node)
1206         }
1207
1208         DepKind::Analysis => { force!(analysis, krate!()); }
1209     );
1210
1211     true
1212 }
1213
1214
1215 // FIXME(#45015): Another piece of boilerplate code that could be generated in
1216 //                a combined define_dep_nodes!()/define_queries!() macro.
1217 macro_rules! impl_load_from_cache {
1218     ($($dep_kind:ident => $query_name:ident,)*) => {
1219         impl DepNode {
1220             // Check whether the query invocation corresponding to the given
1221             // DepNode is eligible for on-disk-caching.
1222             pub fn cache_on_disk(&self, tcx: TyCtxt<'_>) -> bool {
1223                 use crate::ty::query::queries;
1224                 use crate::ty::query::QueryDescription;
1225
1226                 match self.kind {
1227                     $(DepKind::$dep_kind => {
1228                         let def_id = self.extract_def_id(tcx).unwrap();
1229                         queries::$query_name::cache_on_disk(tcx.global_tcx(), def_id)
1230                     })*
1231                     _ => false
1232                 }
1233             }
1234
1235             // This is method will execute the query corresponding to the given
1236             // DepNode. It is only expected to work for DepNodes where the
1237             // above `cache_on_disk` methods returns true.
1238             // Also, as a sanity check, it expects that the corresponding query
1239             // invocation has been marked as green already.
1240             pub fn load_from_on_disk_cache(&self, tcx: TyCtxt<'_>) {
1241                 match self.kind {
1242                     $(DepKind::$dep_kind => {
1243                         debug_assert!(tcx.dep_graph
1244                                          .node_color(self)
1245                                          .map(|c| c.is_green())
1246                                          .unwrap_or(false));
1247
1248                         let def_id = self.extract_def_id(tcx).unwrap();
1249                         let _ = tcx.$query_name(def_id);
1250                     })*
1251                     _ => {
1252                         bug!()
1253                     }
1254                 }
1255             }
1256         }
1257     }
1258 }
1259
1260 impl_load_from_cache!(
1261     typeck_tables_of => typeck_tables_of,
1262     optimized_mir => optimized_mir,
1263     unsafety_check_result => unsafety_check_result,
1264     borrowck => borrowck,
1265     mir_borrowck => mir_borrowck,
1266     mir_const_qualif => mir_const_qualif,
1267     const_is_rvalue_promotable_to_static => const_is_rvalue_promotable_to_static,
1268     check_match => check_match,
1269     type_of => type_of,
1270     generics_of => generics_of,
1271     predicates_of => predicates_of,
1272     used_trait_imports => used_trait_imports,
1273     codegen_fn_attrs => codegen_fn_attrs,
1274     specialization_graph_of => specialization_graph_of,
1275 );