]> git.lizzy.rs Git - rust.git/commitdiff
Move diagnostics out from QueryJob and optimize for the case with no diagnostics
authorJohn Kåre Alsaker <john.kare.alsaker@gmail.com>
Fri, 7 Dec 2018 02:04:23 +0000 (03:04 +0100)
committerJohn Kåre Alsaker <john.kare.alsaker@gmail.com>
Tue, 8 Jan 2019 17:50:43 +0000 (18:50 +0100)
src/librustc/dep_graph/graph.rs
src/librustc/ty/context.rs
src/librustc/ty/query/job.rs
src/librustc/ty/query/on_disk_cache.rs
src/librustc/ty/query/plumbing.rs

index 71cacfe370658ae7124162eacf97fef6091faf11..501ef01d74c6e783cd5dc8e6e3ff55b72803dfdc 100644 (file)
@@ -696,7 +696,7 @@ pub fn try_mark_green<'tcx>(&self,
 
                 // Promote the previous diagnostics to the current session.
                 tcx.queries.on_disk_cache
-                   .store_diagnostics(dep_node_index, diagnostics.clone());
+                   .store_diagnostics(dep_node_index, diagnostics.clone().into());
 
                 for diagnostic in diagnostics {
                     DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
index c0ba4329ae05c24d03b05696b3c8fac0ccf81256..f31d644dbd96b14c2b0921f0c7f62265d119da84 100644 (file)
@@ -1673,6 +1673,7 @@ pub fn enter_local<'tcx, F, R>(
             let new_icx = ty::tls::ImplicitCtxt {
                 tcx,
                 query: icx.query.clone(),
+                diagnostics: icx.diagnostics,
                 layout_depth: icx.layout_depth,
                 task_deps: icx.task_deps,
             };
@@ -1782,6 +1783,7 @@ pub mod tls {
     use errors::{Diagnostic, TRACK_DIAGNOSTICS};
     use rustc_data_structures::OnDrop;
     use rustc_data_structures::sync::{self, Lrc, Lock};
+    use rustc_data_structures::thin_vec::ThinVec;
     use dep_graph::TaskDeps;
 
     #[cfg(not(parallel_queries))]
@@ -1801,10 +1803,14 @@ pub struct ImplicitCtxt<'a, 'gcx: 'tcx, 'tcx> {
         /// by `enter_local` with a new local interner
         pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>,
 
-        /// The current query job, if any. This is updated by start_job in
+        /// The current query job, if any. This is updated by JobOwner::start in
         /// ty::query::plumbing when executing a query
         pub query: Option<Lrc<query::QueryJob<'gcx>>>,
 
+        /// Where to store diagnostics for the current query job, if any.
+        /// This is updated by JobOwner::start in ty::query::plumbing when executing a query
+        pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
+
         /// Used to prevent layout from recursing too deeply.
         pub layout_depth: usize,
 
@@ -1870,8 +1876,9 @@ fn span_debug(span: syntax_pos::Span, f: &mut fmt::Formatter<'_>) -> fmt::Result
     fn track_diagnostic(diagnostic: &Diagnostic) {
         with_context_opt(|icx| {
             if let Some(icx) = icx {
-                if let Some(ref query) = icx.query {
-                    query.diagnostics.lock().push(diagnostic.clone());
+                if let Some(ref diagnostics) = icx.diagnostics {
+                    let mut diagnostics = diagnostics.lock();
+                    diagnostics.extend(Some(diagnostic.clone()));
                 }
             }
         })
@@ -1938,6 +1945,7 @@ pub fn enter_global<'gcx, F, R>(gcx: &'gcx GlobalCtxt<'gcx>, f: F) -> R
             let icx = ImplicitCtxt {
                 tcx,
                 query: None,
+                diagnostics: None,
                 layout_depth: 0,
                 task_deps: None,
             };
@@ -1967,6 +1975,7 @@ pub unsafe fn with_global<F, R>(f: F) -> R
         };
         let icx = ImplicitCtxt {
             query: None,
+            diagnostics: None,
             tcx,
             layout_depth: 0,
             task_deps: None,
index 0063794727fd2c31e9acbc35cf6f3f045e07a997..d794429a8a7064cb12e1a3f4ac8b3f487359663e 100644 (file)
@@ -14,7 +14,6 @@
     config::QueryDescription,
 };
 use ty::context::TyCtxt;
-use errors::Diagnostic;
 use std::process;
 use std::{fmt, ptr};
 
@@ -54,9 +53,6 @@ pub struct QueryJob<'tcx> {
     /// The parent query job which created this job and is implicitly waiting on it.
     pub parent: Option<Lrc<QueryJob<'tcx>>>,
 
-    /// Diagnostic messages which are emitted while the query executes
-    pub diagnostics: Lock<Vec<Diagnostic>>,
-
     /// The latch which is used to wait on this job
     #[cfg(parallel_queries)]
     latch: QueryLatch<'tcx>,
@@ -66,7 +62,6 @@ impl<'tcx> QueryJob<'tcx> {
     /// Creates a new query job
     pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
         QueryJob {
-            diagnostics: Lock::new(Vec::new()),
             info,
             parent,
             #[cfg(parallel_queries)]
index 3432aba7ee0d0304c1b3f205805f1e1af1ac0fd8..a674e942b3893c97049123eb9d632903770d374d 100644 (file)
@@ -7,6 +7,7 @@
 use mir::{self, interpret};
 use mir::interpret::{AllocDecodingSession, AllocDecodingState};
 use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::thin_vec::ThinVec;
 use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once};
 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
 use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque,
@@ -341,11 +342,13 @@ pub fn load_diagnostics<'a, 'tcx>(&self,
     /// Store a diagnostic emitted during the current compilation session.
     /// Anything stored like this will be available via `load_diagnostics` in
     /// the next compilation session.
+    #[inline(never)]
+    #[cold]
     pub fn store_diagnostics(&self,
                              dep_node_index: DepNodeIndex,
-                             diagnostics: Vec<Diagnostic>) {
+                             diagnostics: ThinVec<Diagnostic>) {
         let mut current_diagnostics = self.current_diagnostics.borrow_mut();
-        let prev = current_diagnostics.insert(dep_node_index, diagnostics);
+        let prev = current_diagnostics.insert(dep_node_index, diagnostics.into());
         debug_assert!(prev.is_none());
     }
 
@@ -367,16 +370,16 @@ pub fn try_load_query_result<'tcx, T>(&self,
     /// Since many anonymous queries can share the same `DepNode`, we aggregate
     /// them -- as opposed to regular queries where we assume that there is a
     /// 1:1 relationship between query-key and `DepNode`.
+    #[inline(never)]
+    #[cold]
     pub fn store_diagnostics_for_anon_node(&self,
                                            dep_node_index: DepNodeIndex,
-                                           mut diagnostics: Vec<Diagnostic>) {
+                                           diagnostics: ThinVec<Diagnostic>) {
         let mut current_diagnostics = self.current_diagnostics.borrow_mut();
 
-        let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| {
-            mem::replace(&mut diagnostics, Vec::new())
-        });
+        let x = current_diagnostics.entry(dep_node_index).or_insert(Vec::new());
 
-        x.extend(diagnostics.into_iter());
+        x.extend(Into::<Vec<_>>::into(diagnostics));
     }
 
     fn load_indexed<'tcx, T>(&self,
index 2d619d19b424393e722ac0d3c86c709761c08d9e..3f50758a009af622587fbd1c781d58041f2849e6 100644 (file)
@@ -18,6 +18,7 @@
 
 use rustc_data_structures::fx::{FxHashMap};
 use rustc_data_structures::sync::{Lrc, Lock};
+use rustc_data_structures::thin_vec::ThinVec;
 use std::mem;
 use std::ptr;
 use std::collections::hash_map::Entry;
@@ -195,19 +196,21 @@ pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) {
     pub(super) fn start<'lcx, F, R>(
         &self,
         tcx: TyCtxt<'_, 'tcx, 'lcx>,
+        diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
         compute: F)
-    -> (R, Vec<Diagnostic>)
+    -> R
     where
         F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R
     {
         // The TyCtxt stored in TLS has the same global interner lifetime
         // as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes
         // when accessing the ImplicitCtxt
-        let r = tls::with_related_context(tcx, move |current_icx| {
+        tls::with_related_context(tcx, move |current_icx| {
             // Update the ImplicitCtxt to point to our new query job
             let new_icx = tls::ImplicitCtxt {
                 tcx: tcx.global_tcx(),
                 query: Some(self.job.clone()),
+                diagnostics,
                 layout_depth: current_icx.layout_depth,
                 task_deps: current_icx.task_deps,
             };
@@ -216,13 +219,19 @@ pub(super) fn start<'lcx, F, R>(
             tls::enter_context(&new_icx, |_| {
                 compute(tcx)
             })
-        });
+        })
+    }
 
-        // Extract the diagnostic from the job
-        let diagnostics = mem::replace(&mut *self.job.diagnostics.lock(), Vec::new());
+}
 
-        (r, diagnostics)
-    }
+#[inline(always)]
+fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
+where
+    F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R
+{
+    let diagnostics = Lock::new(ThinVec::new());
+    let result = f(Some(&diagnostics));
+    (result, diagnostics.into_inner())
 }
 
 impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
@@ -402,20 +411,23 @@ fn try_get_with<Q: QueryDescription<'gcx>>(
             profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
             self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
 
-            let res = job.start(self, |tcx| {
-                tcx.dep_graph.with_anon_task(dep_node.kind, || {
-                    Q::compute(tcx.global_tcx(), key)
+            let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
+                job.start(self, diagnostics, |tcx| {
+                    tcx.dep_graph.with_anon_task(dep_node.kind, || {
+                        Q::compute(tcx.global_tcx(), key)
+                    })
                 })
             });
 
             self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
             profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
-            let ((result, dep_node_index), diagnostics) = res;
 
             self.dep_graph.read_index(dep_node_index);
 
-            self.queries.on_disk_cache
-                .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
+            if unlikely!(!diagnostics.is_empty()) {
+                self.queries.on_disk_cache
+                    .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
+            }
 
             job.complete(&result, dep_node_index);
 
@@ -487,7 +499,7 @@ fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'gcx>>(
             // The diagnostics for this query have already been
             // promoted to the current session during
             // try_mark_green(), so we can ignore them here.
-            let (result, _) = job.start(self, |tcx| {
+            let result = job.start(self, None, |tcx| {
                 // The dep-graph for this computation is already in
                 // place
                 tcx.dep_graph.with_ignore(|| {
@@ -566,32 +578,34 @@ fn force_query_with_job<Q: QueryDescription<'gcx>>(
         profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
         self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
 
-        let res = job.start(self, |tcx| {
-            if dep_node.kind.is_eval_always() {
-                tcx.dep_graph.with_eval_always_task(dep_node,
-                                                    tcx,
-                                                    key,
-                                                    Q::compute)
-            } else {
-                tcx.dep_graph.with_task(dep_node,
-                                        tcx,
-                                        key,
-                                        Q::compute)
-            }
+        let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
+            job.start(self, diagnostics, |tcx| {
+                if dep_node.kind.is_eval_always() {
+                    tcx.dep_graph.with_eval_always_task(dep_node,
+                                                        tcx,
+                                                        key,
+                                                        Q::compute)
+                } else {
+                    tcx.dep_graph.with_task(dep_node,
+                                            tcx,
+                                            key,
+                                            Q::compute)
+                }
+            })
         });
 
         self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
         profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
 
-        let ((result, dep_node_index), diagnostics) = res;
-
         if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
             self.dep_graph.mark_loaded_from_cache(dep_node_index, false);
         }
 
         if dep_node.kind != ::dep_graph::DepKind::Null {
-            self.queries.on_disk_cache
-                .store_diagnostics(dep_node_index, diagnostics);
+            if unlikely!(!diagnostics.is_empty()) {
+                self.queries.on_disk_cache
+                    .store_diagnostics(dep_node_index, diagnostics);
+            }
         }
 
         job.complete(&result, dep_node_index);