]> git.lizzy.rs Git - rust.git/commitdiff
Rustfmt.
authorCamille GILLOT <gillot.camille@gmail.com>
Thu, 26 Mar 2020 08:40:50 +0000 (09:40 +0100)
committerCamille GILLOT <gillot.camille@gmail.com>
Thu, 26 Mar 2020 08:40:50 +0000 (09:40 +0100)
src/librustc/dep_graph/mod.rs
src/librustc/ty/query/plumbing.rs
src/librustc_query_system/dep_graph/mod.rs
src/librustc_query_system/query/job.rs
src/librustc_query_system/query/mod.rs
src/librustc_query_system/query/plumbing.rs

index 556b1479b613a2afd8f86a110bd9df73b3dc7905..4d9d439c526e6606ab58567b6e0eaed1317f50dd 100644 (file)
@@ -166,7 +166,11 @@ fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<D
         self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics)
     }
 
-    fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
+    fn store_diagnostics_for_anon_node(
+        &self,
+        dep_node_index: DepNodeIndex,
+        diagnostics: ThinVec<Diagnostic>,
+    ) {
         self.queries.on_disk_cache.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
     }
 
index ef60ac893d21659c262a698d92c2aba8d5763176..fb699c6fae0c78e4989f15d981066fbe471ce90f 100644 (file)
@@ -149,7 +149,8 @@ pub fn try_print_query_stack(handler: &Handler) {
                             query_info.info.query.describe(icx.tcx)
                         ),
                     );
-                    diag.span = icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
+                    diag.span =
+                        icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
                     handler.force_print_diagnostic(diag);
 
                     current_query = query_info.job.parent;
index ca4377e783d91dd219134d0249907b0278cf07ac..2faca54621340fb1b8d080d352ef313070b57555 100644 (file)
@@ -49,7 +49,11 @@ pub trait DepContext: Copy + DepGraphSafe {
     fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
 
     /// Register diagnostics for the given node, for use in next session.
-    fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
+    fn store_diagnostics_for_anon_node(
+        &self,
+        dep_node_index: DepNodeIndex,
+        diagnostics: ThinVec<Diagnostic>,
+    );
 
     /// Access the profiler.
     fn profiler(&self) -> &SelfProfilerRef;
index 9068760d323cef3e094ad6dbac4e877ab1aab4af..92ab97f210a5deadd5cb90fdd7140e6ea654535d 100644 (file)
@@ -1,4 +1,4 @@
-use crate::dep_graph::{DepKind, DepContext};
+use crate::dep_graph::{DepContext, DepKind};
 use crate::query::config::QueryContext;
 use crate::query::plumbing::CycleError;
 
index 0b8ad5c16a59333fd8e11e7b951de39282b683ed..9d0a6665eac60c8c6aa84f6c2e1b7066e1ef8ea6 100644 (file)
@@ -2,9 +2,9 @@
 pub use self::plumbing::*;
 
 mod job;
-pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
 #[cfg(parallel_compiler)]
 pub use self::job::deadlock;
+pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
 
 mod caches;
 pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache};
index 0bae613fcfbea400ae1ee9f46ef864863d309868..6fd86d65c1d4a09a9e0a0f11710b7e7a1bc64ca6 100644 (file)
@@ -2,7 +2,7 @@
 //! generate the actual methods on tcx which find and execute the provider,
 //! manage the caches, and so forth.
 
-use crate::dep_graph::{DepKind, DepContext, DepNode};
+use crate::dep_graph::{DepContext, DepKind, DepNode};
 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
 use crate::query::caches::QueryCache;
 use crate::query::config::{QueryContext, QueryDescription};
@@ -351,285 +351,275 @@ enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache<CTX>>
     Cycle(C::Value),
 }
 
-    /// Checks if the query is already computed and in the cache.
-    /// It returns the shard index and a lock guard to the shard,
-    /// which will be used if the query is not in the cache and we need
-    /// to compute it.
-    #[inline(always)]
-    fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
-        tcx: CTX,
-        state: &QueryState<CTX, C>,
-        key: C::Key,
-        // `on_hit` can be called while holding a lock to the query cache
-        on_hit: OnHit,
-        on_miss: OnMiss,
-    ) -> R
-    where
-        C: QueryCache<CTX>,
-        CTX: QueryContext,
-        OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
-        OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
-    {
-        state.cache.lookup(
-            state,
-            QueryStateShard::<CTX, C::Key, C::Sharded>::get_cache,
-            key,
-            |value, index| {
-                if unlikely!(tcx.profiler().enabled()) {
-                    tcx.profiler().query_cache_hit(index.into());
-                }
-                #[cfg(debug_assertions)]
-                {
-                    state.cache_hits.fetch_add(1, Ordering::Relaxed);
-                }
-                on_hit(value, index)
-            },
-            on_miss,
-        )
-    }
-
-    #[inline(always)]
-    fn try_execute_query<Q, CTX, K>(
-        tcx: CTX,
-        span: Span,
-        key: Q::Key,
-        lookup: QueryLookup<
-            '_,
-            CTX,
-            Q::Key,
-            <Q::Cache as QueryCache<CTX>>::Sharded,
-        >,
-    ) -> Q::Value
-    where
-        Q: QueryDescription<CTX>,
-        CTX: QueryContext<DepKind = K>,
-        CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
-        K: DepKind,
-    {
-        let job = match JobOwner::try_start::<Q, _>(tcx, span, &key, lookup) {
-            TryGetJob::NotYetStarted(job) => job,
-            TryGetJob::Cycle(result) => return result,
-            #[cfg(parallel_compiler)]
-            TryGetJob::JobCompleted((v, index)) => {
-                tcx.dep_graph().read_index(index);
-                return v;
+/// Checks if the query is already computed and in the cache.
+/// It returns the shard index and a lock guard to the shard,
+/// which will be used if the query is not in the cache and we need
+/// to compute it.
+#[inline(always)]
+fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
+    tcx: CTX,
+    state: &QueryState<CTX, C>,
+    key: C::Key,
+    // `on_hit` can be called while holding a lock to the query cache
+    on_hit: OnHit,
+    on_miss: OnMiss,
+) -> R
+where
+    C: QueryCache<CTX>,
+    CTX: QueryContext,
+    OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
+    OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
+{
+    state.cache.lookup(
+        state,
+        QueryStateShard::<CTX, C::Key, C::Sharded>::get_cache,
+        key,
+        |value, index| {
+            if unlikely!(tcx.profiler().enabled()) {
+                tcx.profiler().query_cache_hit(index.into());
             }
-        };
+            #[cfg(debug_assertions)]
+            {
+                state.cache_hits.fetch_add(1, Ordering::Relaxed);
+            }
+            on_hit(value, index)
+        },
+        on_miss,
+    )
+}
 
-        // Fast path for when incr. comp. is off. `to_dep_node` is
-        // expensive for some `DepKind`s.
-        if !tcx.dep_graph().is_fully_enabled() {
-            let null_dep_node = DepNode::new_no_params(DepKind::NULL);
-            return force_query_with_job::<Q, _, _>(tcx, key, job, null_dep_node).0;
+#[inline(always)]
+fn try_execute_query<Q, CTX, K>(
+    tcx: CTX,
+    span: Span,
+    key: Q::Key,
+    lookup: QueryLookup<'_, CTX, Q::Key, <Q::Cache as QueryCache<CTX>>::Sharded>,
+) -> Q::Value
+where
+    Q: QueryDescription<CTX>,
+    CTX: QueryContext<DepKind = K>,
+    CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
+    K: DepKind,
+{
+    let job = match JobOwner::try_start::<Q, _>(tcx, span, &key, lookup) {
+        TryGetJob::NotYetStarted(job) => job,
+        TryGetJob::Cycle(result) => return result,
+        #[cfg(parallel_compiler)]
+        TryGetJob::JobCompleted((v, index)) => {
+            tcx.dep_graph().read_index(index);
+            return v;
         }
+    };
 
-        if Q::ANON {
-            let prof_timer = tcx.profiler().query_provider();
-
-            let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
-                tcx.start_query(job.id, diagnostics, |tcx| {
-                    tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
-                })
-            });
+    // Fast path for when incr. comp. is off. `to_dep_node` is
+    // expensive for some `DepKind`s.
+    if !tcx.dep_graph().is_fully_enabled() {
+        let null_dep_node = DepNode::new_no_params(DepKind::NULL);
+        return force_query_with_job::<Q, _, _>(tcx, key, job, null_dep_node).0;
+    }
 
-            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+    if Q::ANON {
+        let prof_timer = tcx.profiler().query_provider();
 
-            tcx.dep_graph().read_index(dep_node_index);
+        let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
+            tcx.start_query(job.id, diagnostics, |tcx| {
+                tcx.dep_graph().with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
+            })
+        });
 
-            if unlikely!(!diagnostics.is_empty()) {
-                tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
-            }
+        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
-            job.complete(tcx, &result, dep_node_index);
+        tcx.dep_graph().read_index(dep_node_index);
 
-            return result;
+        if unlikely!(!diagnostics.is_empty()) {
+            tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
         }
 
-        let dep_node = Q::to_dep_node(tcx, &key);
-
-        if !Q::EVAL_ALWAYS {
-            // The diagnostics for this query will be
-            // promoted to the current session during
-            // `try_mark_green()`, so we can ignore them here.
-            let loaded = tcx.start_query(job.id, None, |tcx| {
-                let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
-                marked.map(|(prev_dep_node_index, dep_node_index)| {
-                    (
-                        load_from_disk_and_cache_in_memory::<Q, _>(
-                            tcx,
-                            key.clone(),
-                            prev_dep_node_index,
-                            dep_node_index,
-                            &dep_node,
-                        ),
+        job.complete(tcx, &result, dep_node_index);
+
+        return result;
+    }
+
+    let dep_node = Q::to_dep_node(tcx, &key);
+
+    if !Q::EVAL_ALWAYS {
+        // The diagnostics for this query will be
+        // promoted to the current session during
+        // `try_mark_green()`, so we can ignore them here.
+        let loaded = tcx.start_query(job.id, None, |tcx| {
+            let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
+            marked.map(|(prev_dep_node_index, dep_node_index)| {
+                (
+                    load_from_disk_and_cache_in_memory::<Q, _>(
+                        tcx,
+                        key.clone(),
+                        prev_dep_node_index,
                         dep_node_index,
-                    )
-                })
-            });
-            if let Some((result, dep_node_index)) = loaded {
-                job.complete(tcx, &result, dep_node_index);
-                return result;
-            }
+                        &dep_node,
+                    ),
+                    dep_node_index,
+                )
+            })
+        });
+        if let Some((result, dep_node_index)) = loaded {
+            job.complete(tcx, &result, dep_node_index);
+            return result;
         }
-
-        let (result, dep_node_index) = force_query_with_job::<Q, _, _>(tcx, key, job, dep_node);
-        tcx.dep_graph().read_index(dep_node_index);
-        result
     }
 
-    fn load_from_disk_and_cache_in_memory<Q, CTX>(
-        tcx: CTX,
-        key: Q::Key,
-        prev_dep_node_index: SerializedDepNodeIndex,
-        dep_node_index: DepNodeIndex,
-        dep_node: &DepNode<CTX::DepKind>,
-    ) -> Q::Value
-    where
-        CTX: QueryContext,
-        Q: QueryDescription<CTX>,
-    {
-        // Note this function can be called concurrently from the same query
-        // We must ensure that this is handled correctly.
-
-        debug_assert!(tcx.dep_graph().is_green(dep_node));
-
-        // First we try to load the result from the on-disk cache.
-        let result = if Q::cache_on_disk(tcx, key.clone(), None) {
-            let prof_timer = tcx.profiler().incr_cache_loading();
-            let result = Q::try_load_from_disk(tcx, prev_dep_node_index);
-            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
-
-            // We always expect to find a cached result for things that
-            // can be forced from `DepNode`.
-            debug_assert!(
-                !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
-                "missing on-disk cache entry for {:?}",
-                dep_node
-            );
-            result
-        } else {
-            // Some things are never cached on disk.
-            None
-        };
+    let (result, dep_node_index) = force_query_with_job::<Q, _, _>(tcx, key, job, dep_node);
+    tcx.dep_graph().read_index(dep_node_index);
+    result
+}
 
-        let result = if let Some(result) = result {
-            result
-        } else {
-            // We could not load a result from the on-disk cache, so
-            // recompute.
-            let prof_timer = tcx.profiler().query_provider();
+fn load_from_disk_and_cache_in_memory<Q, CTX>(
+    tcx: CTX,
+    key: Q::Key,
+    prev_dep_node_index: SerializedDepNodeIndex,
+    dep_node_index: DepNodeIndex,
+    dep_node: &DepNode<CTX::DepKind>,
+) -> Q::Value
+where
+    CTX: QueryContext,
+    Q: QueryDescription<CTX>,
+{
+    // Note this function can be called concurrently from the same query
+    // We must ensure that this is handled correctly.
 
-            // The dep-graph for this computation is already in-place.
-            let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key));
+    debug_assert!(tcx.dep_graph().is_green(dep_node));
 
-            prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+    // First we try to load the result from the on-disk cache.
+    let result = if Q::cache_on_disk(tcx, key.clone(), None) {
+        let prof_timer = tcx.profiler().incr_cache_loading();
+        let result = Q::try_load_from_disk(tcx, prev_dep_node_index);
+        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
-            result
-        };
+        // We always expect to find a cached result for things that
+        // can be forced from `DepNode`.
+        debug_assert!(
+            !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
+            "missing on-disk cache entry for {:?}",
+            dep_node
+        );
+        result
+    } else {
+        // Some things are never cached on disk.
+        None
+    };
 
-        // If `-Zincremental-verify-ich` is specified, re-hash results from
-        // the cache and make sure that they have the expected fingerprint.
-        if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) {
-            incremental_verify_ich::<Q, _>(tcx, &result, dep_node, dep_node_index);
-        }
+    let result = if let Some(result) = result {
+        result
+    } else {
+        // We could not load a result from the on-disk cache, so
+        // recompute.
+        let prof_timer = tcx.profiler().query_provider();
+
+        // The dep-graph for this computation is already in-place.
+        let result = tcx.dep_graph().with_ignore(|| Q::compute(tcx, key));
+
+        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
         result
+    };
+
+    // If `-Zincremental-verify-ich` is specified, re-hash results from
+    // the cache and make sure that they have the expected fingerprint.
+    if unlikely!(tcx.session().opts.debugging_opts.incremental_verify_ich) {
+        incremental_verify_ich::<Q, _>(tcx, &result, dep_node, dep_node_index);
     }
 
-    #[inline(never)]
-    #[cold]
-    fn incremental_verify_ich<Q, CTX>(
-        tcx: CTX,
-        result: &Q::Value,
-        dep_node: &DepNode<CTX::DepKind>,
-        dep_node_index: DepNodeIndex,
-    )
-    where
-        CTX: QueryContext,
-        Q: QueryDescription<CTX>,
-    {
-        assert!(
-            Some(tcx.dep_graph().fingerprint_of(dep_node_index))
-                == tcx.dep_graph().prev_fingerprint_of(dep_node),
-            "fingerprint for green query instance not loaded from cache: {:?}",
-            dep_node,
-        );
+    result
+}
 
-        debug!("BEGIN verify_ich({:?})", dep_node);
-        let mut hcx = tcx.create_stable_hashing_context();
+#[inline(never)]
+#[cold]
+fn incremental_verify_ich<Q, CTX>(
+    tcx: CTX,
+    result: &Q::Value,
+    dep_node: &DepNode<CTX::DepKind>,
+    dep_node_index: DepNodeIndex,
+) where
+    CTX: QueryContext,
+    Q: QueryDescription<CTX>,
+{
+    assert!(
+        Some(tcx.dep_graph().fingerprint_of(dep_node_index))
+            == tcx.dep_graph().prev_fingerprint_of(dep_node),
+        "fingerprint for green query instance not loaded from cache: {:?}",
+        dep_node,
+    );
 
-        let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
-        debug!("END verify_ich({:?})", dep_node);
+    debug!("BEGIN verify_ich({:?})", dep_node);
+    let mut hcx = tcx.create_stable_hashing_context();
 
-        let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
+    let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
+    debug!("END verify_ich({:?})", dep_node);
 
-        assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
-    }
+    let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
 
-    #[inline(always)]
-    fn force_query_with_job<Q, CTX, K>(
-        tcx: CTX,
-        key: Q::Key,
-        job: JobOwner<'_, CTX, Q::Cache>,
-        dep_node: DepNode<CTX::DepKind>,
-    ) -> (Q::Value, DepNodeIndex)
-    where
-        Q: QueryDescription<CTX>,
-        CTX: QueryContext<DepKind = K>,
-        CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
-        K: DepKind,
-    {
-        // If the following assertion triggers, it can have two reasons:
-        // 1. Something is wrong with DepNode creation, either here or
-        //    in `DepGraph::try_mark_green()`.
-        // 2. Two distinct query keys get mapped to the same `DepNode`
-        //    (see for example #48923).
-        assert!(
-            !tcx.dep_graph().dep_node_exists(&dep_node),
-            "forcing query with already existing `DepNode`\n\
+    assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
+}
+
+#[inline(always)]
+fn force_query_with_job<Q, CTX, K>(
+    tcx: CTX,
+    key: Q::Key,
+    job: JobOwner<'_, CTX, Q::Cache>,
+    dep_node: DepNode<CTX::DepKind>,
+) -> (Q::Value, DepNodeIndex)
+where
+    Q: QueryDescription<CTX>,
+    CTX: QueryContext<DepKind = K>,
+    CTX: HashStableContextProvider<<CTX as DepContext>::StableHashingContext>,
+    K: DepKind,
+{
+    // If the following assertion triggers, it can have two reasons:
+    // 1. Something is wrong with DepNode creation, either here or
+    //    in `DepGraph::try_mark_green()`.
+    // 2. Two distinct query keys get mapped to the same `DepNode`
+    //    (see for example #48923).
+    assert!(
+        !tcx.dep_graph().dep_node_exists(&dep_node),
+        "forcing query with already existing `DepNode`\n\
                  - query-key: {:?}\n\
                  - dep-node: {:?}",
-            key,
-            dep_node
-        );
-
-        let prof_timer = tcx.profiler().query_provider();
+        key,
+        dep_node
+    );
 
-        let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
-            tcx.start_query(job.id, diagnostics, |tcx| {
-                if Q::EVAL_ALWAYS {
-                    tcx.dep_graph().with_eval_always_task(
-                        dep_node,
-                        tcx,
-                        key,
-                        Q::compute,
-                        Q::hash_result,
-                    )
-                } else {
-                    tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
-                }
-            })
-        });
+    let prof_timer = tcx.profiler().query_provider();
+
+    let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
+        tcx.start_query(job.id, diagnostics, |tcx| {
+            if Q::EVAL_ALWAYS {
+                tcx.dep_graph().with_eval_always_task(
+                    dep_node,
+                    tcx,
+                    key,
+                    Q::compute,
+                    Q::hash_result,
+                )
+            } else {
+                tcx.dep_graph().with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
+            }
+        })
+    });
 
-        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
-        if unlikely!(!diagnostics.is_empty()) {
-            if dep_node.kind != DepKind::NULL {
-                tcx.store_diagnostics(dep_node_index, diagnostics);
-            }
+    if unlikely!(!diagnostics.is_empty()) {
+        if dep_node.kind != DepKind::NULL {
+            tcx.store_diagnostics(dep_node_index, diagnostics);
         }
+    }
 
-        job.complete(tcx, &result, dep_node_index);
+    job.complete(tcx, &result, dep_node_index);
 
-        (result, dep_node_index)
-    }
+    (result, dep_node_index)
+}
 
 pub trait QueryGetter: QueryContext {
-    fn get_query<Q: QueryDescription<Self>>(
-        self,
-        span: Span,
-        key: Q::Key,
-    ) -> Q::Value;
+    fn get_query<Q: QueryDescription<Self>>(self, span: Span, key: Q::Key) -> Q::Value;
 
     /// Ensure that either this query has all green inputs or been executed.
     /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
@@ -655,11 +645,7 @@ impl<CTX, K> QueryGetter for CTX
     K: DepKind,
 {
     #[inline(never)]
-    fn get_query<Q: QueryDescription<Self>>(
-        self,
-        span: Span,
-        key: Q::Key,
-    ) -> Q::Value {
+    fn get_query<Q: QueryDescription<Self>>(self, span: Span, key: Q::Key) -> Q::Value {
         debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
 
         try_get_cached(