]> git.lizzy.rs Git - rust.git/blobdiff - src/librustc/ty/query/plumbing.rs
Rollup merge of #68043 - Zoxc:missing-timers, r=wesleywiser
[rust.git] / src / librustc / ty / query / plumbing.rs
index fc55b665c1d0e671333e3d9f725142292e9ee02a..84efbe21f10aa25bce31a01e0599f574e9e4e92f 100644 (file)
@@ -2,30 +2,28 @@
 //! generate the actual methods on tcx which find and execute the provider,
 //! manage the caches, and so forth.
 
-use crate::dep_graph::{DepNodeIndex, DepNode, DepKind, SerializedDepNodeIndex};
+use crate::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
+use crate::ty::query::config::{QueryConfig, QueryDescription};
+use crate::ty::query::job::{QueryInfo, QueryJob};
+use crate::ty::query::Query;
 use crate::ty::tls;
 use crate::ty::{self, TyCtxt};
-use crate::ty::query::Query;
-use crate::ty::query::config::{QueryConfig, QueryDescription};
-use crate::ty::query::job::{QueryJob, QueryInfo};
-
-use errors::DiagnosticBuilder;
-use errors::Level;
-use errors::Diagnostic;
-use errors::FatalError;
-use errors::Handler;
-use rustc_data_structures::fx::{FxHasher, FxHashMap};
-use rustc_data_structures::sync::{Lrc, Lock};
-use rustc_data_structures::sharded::Sharded;
-use rustc_data_structures::thin_vec::ThinVec;
+
 #[cfg(not(parallel_compiler))]
 use rustc_data_structures::cold_path;
+use rustc_data_structures::fx::{FxHashMap, FxHasher};
+#[cfg(parallel_compiler)]
+use rustc_data_structures::profiling::TimingGuard;
+use rustc_data_structures::sharded::Sharded;
+use rustc_data_structures::sync::{Lock, Lrc};
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level};
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::Span;
+use std::collections::hash_map::Entry;
 use std::hash::{Hash, Hasher};
 use std::mem;
 use std::ptr;
-use std::collections::hash_map::Entry;
-use syntax_pos::Span;
-use syntax::source_map::DUMMY_SP;
 
 use rustc_error_codes::*;
 
@@ -42,13 +40,8 @@ pub(super) struct QueryValue<T> {
 }
 
 impl<T> QueryValue<T> {
-    pub(super) fn new(value: T,
-                      dep_node_index: DepNodeIndex)
-                      -> QueryValue<T> {
-        QueryValue {
-            value,
-            index: dep_node_index,
-        }
+    pub(super) fn new(value: T, dep_node_index: DepNodeIndex) -> QueryValue<T> {
+        QueryValue { value, index: dep_node_index }
     }
 }
 
@@ -91,6 +84,19 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
     /// for some compile-time benchmarks.
     #[inline(always)]
     pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<'a, 'tcx, Q> {
+        // Handling the `query_blocked_prof_timer` is a bit weird because of the
+        // control flow in this function: Blocking is implemented by
+        // awaiting a running job and, once that is done, entering the loop below
+        // again from the top. In that second iteration we will hit the
+        // cache which provides us with the information we need for
+        // finishing the "query-blocked" event.
+        //
+        // We thus allocate `query_blocked_prof_timer` outside the loop,
+        // initialize it during the first iteration and finish it during the
+        // second iteration.
+        #[cfg(parallel_compiler)]
+        let mut query_blocked_prof_timer: Option<TimingGuard<'_>> = None;
+
         let cache = Q::query_cache(tcx);
         loop {
             // We compute the key's hash once and then use it for both the
@@ -104,7 +110,17 @@ pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<
             if let Some((_, value)) =
                 lock.results.raw_entry().from_key_hashed_nocheck(key_hash, key)
             {
-                tcx.prof.query_cache_hit(Q::NAME);
+                if unlikely!(tcx.prof.enabled()) {
+                    tcx.prof.query_cache_hit(value.index.into());
+
+                    #[cfg(parallel_compiler)]
+                    {
+                        if let Some(prof_timer) = query_blocked_prof_timer.take() {
+                            prof_timer.finish_with_query_invocation_id(value.index.into());
+                        }
+                    }
+                }
+
                 let result = (value.value.clone(), value.index);
                 #[cfg(debug_assertions)]
                 {
@@ -113,9 +129,6 @@ pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<
                 return TryGetJob::JobCompleted(result);
             }
 
-            #[cfg(parallel_compiler)]
-            let query_blocked_prof_timer;
-
             let job = match lock.active.entry((*key).clone()) {
                 Entry::Occupied(entry) => {
                     match *entry.get() {
@@ -125,11 +138,11 @@ pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<
                             // self-profiler.
                             #[cfg(parallel_compiler)]
                             {
-                                query_blocked_prof_timer = tcx.prof.query_blocked(Q::NAME);
+                                query_blocked_prof_timer = Some(tcx.prof.query_blocked());
                             }
 
                             job.clone()
-                        },
+                        }
                         QueryResult::Poisoned => FatalError.raise(),
                     }
                 }
@@ -139,19 +152,12 @@ pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<
                         // Create the `parent` variable before `info`. This allows LLVM
                         // to elide the move of `info`
                         let parent = icx.query.clone();
-                        let info = QueryInfo {
-                            span,
-                            query: Q::query(key.clone()),
-                        };
+                        let info = QueryInfo { span, query: Q::query(key.clone()) };
                         let job = Lrc::new(QueryJob::new(info, parent));
-                        let owner = JobOwner {
-                            cache,
-                            job: job.clone(),
-                            key: (*key).clone(),
-                        };
+                        let owner = JobOwner { cache, job: job.clone(), key: (*key).clone() };
                         entry.insert(QueryResult::Started(job));
                         TryGetJob::NotYetStarted(owner)
-                    })
+                    });
                 }
             };
             mem::drop(lock);
@@ -169,11 +175,6 @@ pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<
             {
                 let result = job.r#await(tcx, span);
 
-                // This `drop()` is not strictly necessary as the binding
-                // would go out of scope anyway. But it's good to have an
-                // explicit marker of how far the measurement goes.
-                drop(query_blocked_prof_timer);
-
                 if let Err(cycle) = result {
                     return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
                 }
@@ -207,7 +208,7 @@ pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) {
 #[inline(always)]
 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
 where
-    F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R
+    F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
 {
     let diagnostics = Lock::new(ThinVec::new());
     let result = f(Some(&diagnostics));
@@ -276,9 +277,7 @@ pub(super) fn start_query<F, R>(
             };
 
             // Use the `ImplicitCtxt` while we execute the query.
-            tls::enter_context(&new_icx, |_| {
-                compute(self)
-            })
+            tls::enter_context(&new_icx, |_| compute(self))
         })
     }
 
@@ -300,11 +299,13 @@ pub(super) fn report_cycle(
         // collect/coherence phases anyhow.)
         ty::print::with_forced_impl_filename_line(|| {
             let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
-            let mut err = struct_span_err!(self.sess,
-                                           span,
-                                           E0391,
-                                           "cycle detected when {}",
-                                           stack[0].query.describe(self));
+            let mut err = struct_span_err!(
+                self.sess,
+                span,
+                E0391,
+                "cycle detected when {}",
+                stack[0].query.describe(self)
+            );
 
             for i in 1..stack.len() {
                 let query = &stack[i].query;
@@ -312,12 +313,16 @@ pub(super) fn report_cycle(
                 err.span_note(span, &format!("...which requires {}...", query.describe(self)));
             }
 
-            err.note(&format!("...which again requires {}, completing the cycle",
-                              stack[0].query.describe(self)));
+            err.note(&format!(
+                "...which again requires {}, completing the cycle",
+                stack[0].query.describe(self)
+            ));
 
             if let Some((span, query)) = usage {
-                err.span_note(fix_span(span, &query),
-                              &format!("cycle used when {}", query.describe(self)));
+                err.span_note(
+                    fix_span(span, &query),
+                    &format!("cycle used when {}", query.describe(self)),
+                );
             }
 
             err
@@ -336,11 +341,15 @@ pub fn try_print_query_stack(handler: &Handler) {
                 let mut i = 0;
 
                 while let Some(query) = current_query {
-                    let mut diag = Diagnostic::new(Level::FailureNote,
-                        &format!("#{} [{}] {}",
-                                 i,
-                                 query.info.query.name(),
-                                 query.info.query.describe(icx.tcx)));
+                    let mut diag = Diagnostic::new(
+                        Level::FailureNote,
+                        &format!(
+                            "#{} [{}] {}",
+                            i,
+                            query.info.query.name(),
+                            query.info.query.describe(icx.tcx)
+                        ),
+                    );
                     diag.span = icx.tcx.sess.source_map().def_span(query.info.span).into();
                     handler.force_print_diagnostic(diag);
 
@@ -355,17 +364,14 @@ pub fn try_print_query_stack(handler: &Handler) {
 
     #[inline(never)]
     pub(super) fn get_query<Q: QueryDescription<'tcx>>(self, span: Span, key: Q::Key) -> Q::Value {
-        debug!("ty::query::get_query<{}>(key={:?}, span={:?})",
-               Q::NAME.as_str(),
-               key,
-               span);
+        debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
 
         let job = match JobOwner::try_get(self, span, &key) {
             TryGetJob::NotYetStarted(job) => job,
             TryGetJob::Cycle(result) => return result,
             TryGetJob::JobCompleted((v, index)) => {
                 self.dep_graph.read_index(index);
-                return v
+                return v;
             }
         };
 
@@ -377,23 +383,21 @@ pub(super) fn get_query<Q: QueryDescription<'tcx>>(self, span: Span, key: Q::Key
         }
 
         if Q::ANON {
-
-            let prof_timer = self.prof.query_provider(Q::NAME);
+            let prof_timer = self.prof.query_provider();
 
             let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
                 self.start_query(job.job.clone(), diagnostics, |tcx| {
-                    tcx.dep_graph.with_anon_task(Q::dep_kind(), || {
-                        Q::compute(tcx, key)
-                    })
+                    tcx.dep_graph.with_anon_task(Q::dep_kind(), || Q::compute(tcx, key))
                 })
             });
 
-            drop(prof_timer);
+            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
             self.dep_graph.read_index(dep_node_index);
 
             if unlikely!(!diagnostics.is_empty()) {
-                self.queries.on_disk_cache
+                self.queries
+                    .on_disk_cache
                     .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
             }
 
@@ -411,12 +415,15 @@ pub(super) fn get_query<Q: QueryDescription<'tcx>>(self, span: Span, key: Q::Key
             let loaded = self.start_query(job.job.clone(), None, |tcx| {
                 let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
                 marked.map(|(prev_dep_node_index, dep_node_index)| {
-                    (tcx.load_from_disk_and_cache_in_memory::<Q>(
-                        key.clone(),
-                        prev_dep_node_index,
+                    (
+                        tcx.load_from_disk_and_cache_in_memory::<Q>(
+                            key.clone(),
+                            prev_dep_node_index,
+                            dep_node_index,
+                            &dep_node,
+                        ),
                         dep_node_index,
-                        &dep_node
-                    ), dep_node_index)
+                    )
                 })
             });
             if let Some((result, dep_node_index)) = loaded {
@@ -443,17 +450,20 @@ fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'tcx>>(
         debug_assert!(self.dep_graph.is_green(dep_node));
 
         // First we try to load the result from the on-disk cache.
-        let result = if Q::cache_on_disk(self, key.clone(), None) &&
-                        self.sess.opts.debugging_opts.incremental_queries {
-            let _prof_timer = self.prof.incr_cache_loading(Q::NAME);
+        let result = if Q::cache_on_disk(self, key.clone(), None)
+            && self.sess.opts.debugging_opts.incremental_queries
+        {
+            let prof_timer = self.prof.incr_cache_loading();
             let result = Q::try_load_from_disk(self, prev_dep_node_index);
+            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
             // We always expect to find a cached result for things that
             // can be forced from `DepNode`.
-            debug_assert!(!dep_node.kind.can_reconstruct_query_key() ||
-                          result.is_some(),
-                          "missing on-disk cache entry for {:?}",
-                          dep_node);
+            debug_assert!(
+                !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
+                "missing on-disk cache entry for {:?}",
+                dep_node
+            );
             result
         } else {
             // Some things are never cached on disk.
@@ -465,12 +475,12 @@ fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'tcx>>(
         } else {
             // We could not load a result from the on-disk cache, so
             // recompute.
-            let _prof_timer = self.prof.query_provider(Q::NAME);
+            let prof_timer = self.prof.query_provider();
 
             // The dep-graph for this computation is already in-place.
-            let result = self.dep_graph.with_ignore(|| {
-                Q::compute(self, key)
-            });
+            let result = self.dep_graph.with_ignore(|| Q::compute(self, key));
+
+            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
             result
         };
@@ -495,8 +505,8 @@ fn incremental_verify_ich<Q: QueryDescription<'tcx>>(
         use crate::ich::Fingerprint;
 
         assert!(
-            Some(self.dep_graph.fingerprint_of(dep_node_index)) ==
-                self.dep_graph.prev_fingerprint_of(dep_node),
+            Some(self.dep_graph.fingerprint_of(dep_node_index))
+                == self.dep_graph.prev_fingerprint_of(dep_node),
             "fingerprint for green query instance not loaded from cache: {:?}",
             dep_node,
         );
@@ -509,11 +519,7 @@ fn incremental_verify_ich<Q: QueryDescription<'tcx>>(
 
         let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
 
-        assert!(
-            new_hash == old_hash,
-            "found unstable fingerprints for {:?}",
-            dep_node,
-        );
+        assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
     }
 
     #[inline(always)]
@@ -528,38 +534,38 @@ fn force_query_with_job<Q: QueryDescription<'tcx>>(
         //    in `DepGraph::try_mark_green()`.
         // 2. Two distinct query keys get mapped to the same `DepNode`
         //    (see for example #48923).
-        assert!(!self.dep_graph.dep_node_exists(&dep_node),
-                "forcing query with already existing `DepNode`\n\
+        assert!(
+            !self.dep_graph.dep_node_exists(&dep_node),
+            "forcing query with already existing `DepNode`\n\
                  - query-key: {:?}\n\
                  - dep-node: {:?}",
-                key, dep_node);
+            key,
+            dep_node
+        );
 
-        let prof_timer = self.prof.query_provider(Q::NAME);
+        let prof_timer = self.prof.query_provider();
 
         let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
             self.start_query(job.job.clone(), diagnostics, |tcx| {
                 if Q::EVAL_ALWAYS {
-                    tcx.dep_graph.with_eval_always_task(dep_node,
-                                                        tcx,
-                                                        key,
-                                                        Q::compute,
-                                                        Q::hash_result)
+                    tcx.dep_graph.with_eval_always_task(
+                        dep_node,
+                        tcx,
+                        key,
+                        Q::compute,
+                        Q::hash_result,
+                    )
                 } else {
-                    tcx.dep_graph.with_task(dep_node,
-                                            tcx,
-                                            key,
-                                            Q::compute,
-                                            Q::hash_result)
+                    tcx.dep_graph.with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
                 }
             })
         });
 
-        drop(prof_timer);
+        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
         if unlikely!(!diagnostics.is_empty()) {
             if dep_node.kind != crate::dep_graph::DepKind::Null {
-                self.queries.on_disk_cache
-                    .store_diagnostics(dep_node_index, diagnostics);
+                self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics);
             }
         }
 
@@ -586,17 +592,19 @@ pub(super) fn ensure_query<Q: QueryDescription<'tcx>>(self, key: Q::Key) -> () {
 
         let dep_node = Q::to_dep_node(self, &key);
 
-        if self.dep_graph.try_mark_green_and_read(self, &dep_node).is_none() {
-            // A None return from `try_mark_green_and_read` means that this is either
-            // a new dep node or that the dep node has already been marked red.
-            // Either way, we can't call `dep_graph.read()` as we don't have the
-            // DepNodeIndex. We must invoke the query itself. The performance cost
-            // this introduces should be negligible as we'll immediately hit the
-            // in-memory cache, or another query down the line will.
-
-            let _ = self.get_query::<Q>(DUMMY_SP, key);
-        } else {
-            self.prof.query_cache_hit(Q::NAME);
+        match self.dep_graph.try_mark_green_and_read(self, &dep_node) {
+            None => {
+                // A None return from `try_mark_green_and_read` means that this is either
+                // a new dep node or that the dep node has already been marked red.
+                // Either way, we can't call `dep_graph.read()` as we don't have the
+                // DepNodeIndex. We must invoke the query itself. The performance cost
+                // this introduces should be negligible as we'll immediately hit the
+                // in-memory cache, or another query down the line will.
+                let _ = self.get_query::<Q>(DUMMY_SP, key);
+            }
+            Some((_, dep_node_index)) => {
+                self.prof.query_cache_hit(dep_node_index.into());
+            }
         }
     }
 
@@ -606,10 +614,7 @@ fn force_query<Q: QueryDescription<'tcx>>(self, key: Q::Key, span: Span, dep_nod
         // Ensure that only one of them runs the query.
         let job = match JobOwner::try_get(self, span, &key) {
             TryGetJob::NotYetStarted(job) => job,
-            TryGetJob::Cycle(_) |
-            TryGetJob::JobCompleted(_) => {
-                return
-            }
+            TryGetJob::Cycle(_) | TryGetJob::JobCompleted(_) => return,
         };
         self.force_query_with_job::<Q>(key, job, dep_node);
     }
@@ -830,36 +835,6 @@ fn stats<'tcx, Q: QueryConfig<'tcx>>(
             }
         }
 
-        #[allow(nonstandard_style)]
-        #[derive(Clone, Copy)]
-        pub enum QueryName {
-            $($name),*
-        }
-
-        impl rustc_data_structures::profiling::QueryName for QueryName {
-            fn discriminant(self) -> std::mem::Discriminant<QueryName> {
-                std::mem::discriminant(&self)
-            }
-
-            fn as_str(self) -> &'static str {
-                QueryName::as_str(&self)
-            }
-        }
-
-        impl QueryName {
-            pub fn register_with_profiler(
-                profiler: &rustc_data_structures::profiling::SelfProfiler,
-            ) {
-                $(profiler.register_query_name(QueryName::$name);)*
-            }
-
-            pub fn as_str(&self) -> &'static str {
-                match self {
-                    $(QueryName::$name => stringify!($name),)*
-                }
-            }
-        }
-
         #[allow(nonstandard_style)]
         #[derive(Clone, Debug)]
         pub enum Query<$tcx> {
@@ -900,12 +875,6 @@ pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
                     $(Query::$name(key) => key.default_span(tcx),)*
                 }
             }
-
-            pub fn query_name(&self) -> QueryName {
-                match self {
-                    $(Query::$name(_) => QueryName::$name,)*
-                }
-            }
         }
 
         impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
@@ -940,7 +909,7 @@ pub fn $name<F: FnOnce() -> R, R>(f: F) -> R {
             type Key = $K;
             type Value = $V;
 
-            const NAME: QueryName = QueryName::$name;
+            const NAME: &'static str = stringify!($name);
             const CATEGORY: ProfileCategory = $category;
         }
 
@@ -1052,6 +1021,35 @@ pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
             pub fn $name(self, key: $K) -> $V {
                 self.at(DUMMY_SP).$name(key)
             })*
+
+            /// All self-profiling events generated by the query engine use
+            /// virtual `StringId`s for their `event_id`. This method makes all
+            /// those virtual `StringId`s point to actual strings.
+            ///
+            /// If we are recording only summary data, the ids will point to
+            /// just the query names. If we are recording query keys too, we
+            /// allocate the corresponding strings here.
+            pub fn alloc_self_profile_query_strings(self) {
+                use crate::ty::query::profiling_support::{
+                    alloc_self_profile_query_strings_for_query_cache,
+                    QueryKeyStringCache,
+                };
+
+                if !self.prof.enabled() {
+                    return;
+                }
+
+                let mut string_cache = QueryKeyStringCache::new();
+
+                $({
+                    alloc_self_profile_query_strings_for_query_cache(
+                        self,
+                        stringify!($name),
+                        &self.queries.$name,
+                        &mut string_cache,
+                    );
+                })*
+            }
         }
 
         impl TyCtxtAt<$tcx> {
@@ -1110,7 +1108,6 @@ fn default() -> Self {
     };
 }
 
-
 /// The red/green evaluation system will try to mark a specific DepNode in the
 /// dependency graph as green by recursively trying to mark the dependencies of
 /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
@@ -1169,11 +1166,13 @@ pub fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool {
     // each CGU, right after partitioning. This way `try_mark_green` will always
     // hit the cache instead of having to go through `force_from_dep_node`.
     // This assertion makes sure, we actually keep applying the solution above.
-    debug_assert!(dep_node.kind != DepKind::codegen_unit,
-                  "calling force_from_dep_node() on DepKind::codegen_unit");
+    debug_assert!(
+        dep_node.kind != DepKind::codegen_unit,
+        "calling force_from_dep_node() on DepKind::codegen_unit"
+    );
 
     if !dep_node.kind.can_reconstruct_query_key() {
-        return false
+        return false;
     }
 
     rustc_dep_node_force!([dep_node, tcx]