use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
use rustc_data_structures::sync::{Lock, LockGuard};
use rustc_data_structures::thin_vec::ThinVec;
-use rustc_errors::{Diagnostic, DiagnosticBuilder, FatalError};
+use rustc_errors::{DiagnosticBuilder, FatalError};
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
use std::collections::hash_map::Entry;
}
}
-fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
-where
- F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
-{
- let diagnostics = Lock::new(ThinVec::new());
- let result = f(Some(&diagnostics));
- (result, diagnostics.into_inner())
-}
-
impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K>
where
D: Copy + Clone + Eq + Hash,
fn execute_job<CTX, K, V>(
tcx: CTX,
key: K,
- dep_node: Option<DepNode<CTX::DepKind>>,
+ mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
query: &QueryVtable<CTX, K, V>,
job_id: QueryJobId<CTX::DepKind>,
compute: fn(CTX::DepContext, K) -> V,
return (result, dep_node_index);
}
- if query.anon {
- let prof_timer = tcx.dep_context().profiler().query_provider();
-
- let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
- tcx.start_query(job_id, diagnostics, || {
- dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
- compute(*tcx.dep_context(), key)
- })
- })
- });
+ if !query.anon && !query.eval_always {
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node =
+ dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
- prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+ // The diagnostics for this query will be promoted to the current session during
+ // `try_mark_green()`, so we can ignore them here.
+ if let Some(ret) = tcx.start_query(job_id, None, || {
+ try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute)
+ }) {
+ return ret;
+ }
+ }
- let side_effects = QuerySideEffects { diagnostics };
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+ let diagnostics = Lock::new(ThinVec::new());
- if unlikely!(!side_effects.is_empty()) {
- tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
+ let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
+ if query.anon {
+ return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
+ compute(*tcx.dep_context(), key)
+ });
}
- (result, dep_node_index)
- } else if query.eval_always {
// `to_dep_node` is expensive for some `DepKind`s.
- let dep_node = dep_node.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
- force_query_with_job(tcx, key, job_id, dep_node, query, compute)
- } else {
- // `to_dep_node` is expensive for some `DepKind`s.
- let dep_node = dep_node.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
- // The diagnostics for this query will be
- // promoted to the current session during
- // `try_mark_green()`, so we can ignore them here.
- let loaded = tcx.start_query(job_id, None, || {
- try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute)
- });
- if let Some((result, dep_node_index)) = loaded {
- (result, dep_node_index)
+ let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
+
+ if query.eval_always {
+ tcx.dep_context().dep_graph().with_eval_always_task(
+ dep_node,
+ *tcx.dep_context(),
+ key,
+ compute,
+ query.hash_result,
+ )
+ } else {
+ tcx.dep_context().dep_graph().with_task(
+ dep_node,
+ *tcx.dep_context(),
+ key,
+ compute,
+ query.hash_result,
+ )
+ }
+ });
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ let diagnostics = diagnostics.into_inner();
+ let side_effects = QuerySideEffects { diagnostics };
+
+ if unlikely!(!side_effects.is_empty()) {
+ if query.anon {
+ tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
} else {
- force_query_with_job(tcx, key, job_id, dep_node, query, compute)
+ tcx.store_side_effects(dep_node_index, side_effects);
}
}
+
+ (result, dep_node_index)
}
fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
}
}
-fn force_query_with_job<CTX, K, V>(
- tcx: CTX,
- key: K,
- job_id: QueryJobId<CTX::DepKind>,
- dep_node: DepNode<CTX::DepKind>,
- query: &QueryVtable<CTX, K, V>,
- compute: fn(CTX::DepContext, K) -> V,
-) -> (V, DepNodeIndex)
-where
- CTX: QueryContext,
- K: Debug,
-{
- let prof_timer = tcx.dep_context().profiler().query_provider();
-
- let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
- tcx.start_query(job_id, diagnostics, || {
- if query.eval_always {
- tcx.dep_context().dep_graph().with_eval_always_task(
- dep_node,
- *tcx.dep_context(),
- key,
- compute,
- query.hash_result,
- )
- } else {
- tcx.dep_context().dep_graph().with_task(
- dep_node,
- *tcx.dep_context(),
- key,
- compute,
- query.hash_result,
- )
- }
- })
- });
-
- prof_timer.finish_with_query_invocation_id(dep_node_index.into());
-
- let side_effects = QuerySideEffects { diagnostics };
-
- if unlikely!(!side_effects.is_empty()) && dep_node.kind != DepKind::NULL {
- tcx.store_side_effects(dep_node_index, side_effects);
- }
-
- (result, dep_node_index)
-}
-
/// Ensure that either this query has all green inputs or been executed.
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
/// Returns true if the query should still run.