X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;f=compiler%2Frustc_query_system%2Fsrc%2Fquery%2Fplumbing.rs;h=b3b939eae88dce04c1e1efa8bb3a6bcc19a9910a;hb=6d225bb0804e333aa411acce45de4230845bcf2b;hp=53844dab9db59490992e073f0b62bf742df3f223;hpb=9ae98804e51178a12bc8166bf469abc5283e10fd;p=rust.git diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 53844dab9db..ffc413d15f5 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,10 +2,9 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; +use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; -use crate::query::config::QueryVTable; use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::values::Value; @@ -131,7 +130,7 @@ fn mk_cycle( where Qcx: QueryContext + crate::query::HasDepContext, V: std::fmt::Debug + Value, - R: Clone, + R: Copy, { let error = report_cycle(qcx.dep_context().sess(), &cycle_error); let value = handle_cycle_error(*qcx.dep_context(), &cycle_error, error, handler); @@ -340,72 +339,74 @@ enum TryGetJob<'tcx, K, D> /// which will be used if the query is not in the cache and we need /// to compute it. #[inline] -pub fn try_get_cached( - tcx: Tcx, - cache: &C, - key: &C::Key, - // `on_hit` can be called while holding a lock to the query cache - on_hit: OnHit, -) -> Result +pub fn try_get_cached(tcx: Tcx, cache: &C, key: &C::Key) -> Option where C: QueryCache, Tcx: DepContext, - OnHit: FnOnce(&C::Stored) -> R, { - cache.lookup(&key, |value, index| { - if std::intrinsics::unlikely(tcx.profiler().enabled()) { - tcx.profiler().query_cache_hit(index.into()); + match cache.lookup(&key) { + Some((value, index)) => { + if std::intrinsics::unlikely(tcx.profiler().enabled()) { + tcx.profiler().query_cache_hit(index.into()); + } + tcx.dep_graph().read_index(index); + Some(value) } - tcx.dep_graph().read_index(index); - on_hit(value) - }) + None => None, + } } -fn try_execute_query( +fn try_execute_query( qcx: Qcx, - state: &QueryState, - cache: &C, + state: &QueryState, + cache: &Q::Cache, span: Span, - key: C::Key, + key: Q::Key, dep_node: Option>, - query: &QueryVTable, -) -> (C::Stored, Option) +) -> (Q::Stored, Option) where - C: QueryCache, - C::Key: Clone + DepNodeParams, - C::Value: Value, - C::Stored: Debug + std::borrow::Borrow, + Q: QueryConfig, Qcx: QueryContext, { - match JobOwner::<'_, C::Key, Qcx::DepKind>::try_start(&qcx, state, span, key.clone()) { + match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key.clone()) { TryGetJob::NotYetStarted(job) => { - let (result, dep_node_index) = execute_job(qcx, key.clone(), dep_node, query, job.id); - if query.feedable { + let (result, dep_node_index) = + execute_job::(qcx, key.clone(), dep_node, job.id); + if Q::FEEDABLE { // We may have put a value inside the cache from inside the execution. // Verify that it has the same hash as what we have now, to ensure consistency. - let _ = cache.lookup(&key, |cached_result, _| { - let hasher = query.hash_result.expect("feedable forbids no_hash"); - let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, cached_result.borrow())); - let new_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result)); + if let Some((cached_result, _)) = cache.lookup(&key) { + let hasher = Q::HASH_RESULT.expect("feedable forbids no_hash"); + + let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| { + hasher(&mut hcx, cached_result.borrow()) + }); + let new_hash = qcx + .dep_context() + .with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result)); debug_assert_eq!( - old_hash, new_hash, + old_hash, + new_hash, "Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}", - query.dep_kind, key, result, cached_result, + Q::DEP_KIND, + key, + result, + cached_result, ); - }); + } } let result = job.complete(cache, result, dep_node_index); (result, Some(dep_node_index)) } TryGetJob::Cycle(error) => { - let result = mk_cycle(qcx, error, query.handle_cycle_error, cache); + let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR, cache); (result, None) } #[cfg(parallel_compiler)] TryGetJob::JobCompleted(query_blocked_prof_timer) => { - let (v, index) = cache - .lookup(&key, |value, index| (value.clone(), index)) - .unwrap_or_else(|_| panic!("value must be in cache after waiting")); + let Some((v, index)) = cache.lookup(&key) else { + panic!("value must be in cache after waiting") + }; if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) { qcx.dep_context().profiler().query_cache_hit(index.into()); @@ -417,16 +418,14 @@ fn try_execute_query( } } -fn execute_job( +fn execute_job( qcx: Qcx, - key: K, + key: Q::Key, mut dep_node_opt: Option>, - query: &QueryVTable, job_id: QueryJobId, -) -> (V, DepNodeIndex) +) -> (Q::Value, DepNodeIndex) where - K: Clone + DepNodeParams, - V: Debug, + Q: QueryConfig, Qcx: QueryContext, { let dep_graph = qcx.dep_context().dep_graph(); @@ -434,23 +433,23 @@ fn execute_job( // Fast path for when incr. comp. is off. if !dep_graph.is_fully_enabled() { let prof_timer = qcx.dep_context().profiler().query_provider(); - let result = qcx.start_query(job_id, query.depth_limit, None, || { - query.compute(*qcx.dep_context(), key) + let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || { + Q::compute(qcx, &key)(*qcx.dep_context(), key) }); let dep_node_index = dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); return (result, dep_node_index); } - if !query.anon && !query.eval_always { + if !Q::ANON && !Q::EVAL_ALWAYS { // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.get_or_insert_with(|| query.to_dep_node(*qcx.dep_context(), &key)); + dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key)); // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. if let Some(ret) = qcx.start_query(job_id, false, None, || { - try_load_from_disk_and_cache_in_memory(qcx, &key, &dep_node, query) + try_load_from_disk_and_cache_in_memory::(qcx, &key, &dep_node) }) { return ret; } @@ -460,18 +459,19 @@ fn execute_job( let diagnostics = Lock::new(ThinVec::new()); let (result, dep_node_index) = - qcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || { - if query.anon { - return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind, || { - query.compute(*qcx.dep_context(), key) + qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || { + if Q::ANON { + return dep_graph.with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || { + Q::compute(qcx, &key)(*qcx.dep_context(), key) }); } // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.unwrap_or_else(|| query.to_dep_node(*qcx.dep_context(), &key)); + dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key)); - dep_graph.with_task(dep_node, *qcx.dep_context(), key, query.compute, query.hash_result) + let task = Q::compute(qcx, &key); + dep_graph.with_task(dep_node, *qcx.dep_context(), key, task, Q::HASH_RESULT) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -480,7 +480,7 @@ fn execute_job( let side_effects = QuerySideEffects { diagnostics }; if std::intrinsics::unlikely(!side_effects.is_empty()) { - if query.anon { + if Q::ANON { qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); } else { qcx.store_side_effects(dep_node_index, side_effects); @@ -490,16 +490,14 @@ fn execute_job( (result, dep_node_index) } -fn try_load_from_disk_and_cache_in_memory( +fn try_load_from_disk_and_cache_in_memory( qcx: Qcx, - key: &K, + key: &Q::Key, dep_node: &DepNode, - query: &QueryVTable, -) -> Option<(V, DepNodeIndex)> +) -> Option<(Q::Value, DepNodeIndex)> where - K: Clone, + Q: QueryConfig, Qcx: QueryContext, - V: Debug, { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. @@ -511,7 +509,7 @@ fn try_load_from_disk_and_cache_in_memory( // First we try to load the result from the on-disk cache. // Some things are never cached on disk. - if let Some(try_load_from_disk) = query.try_load_from_disk { + if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) { let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); // The call to `with_query_deserialization` enforces that no new `DepNodes` @@ -545,7 +543,7 @@ fn try_load_from_disk_and_cache_in_memory( if std::intrinsics::unlikely( try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); } return Some((result, dep_node_index)); @@ -555,8 +553,7 @@ fn try_load_from_disk_and_cache_in_memory( // can be forced from `DepNode`. debug_assert!( !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), - "missing on-disk cache entry for {:?}", - dep_node + "missing on-disk cache entry for {dep_node:?}" ); } @@ -565,7 +562,7 @@ fn try_load_from_disk_and_cache_in_memory( let prof_timer = qcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = dep_graph.with_ignore(|| query.compute(*qcx.dep_context(), key.clone())); + let result = dep_graph.with_ignore(|| Q::compute(qcx, key)(*qcx.dep_context(), key.clone())); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -578,7 +575,7 @@ fn try_load_from_disk_and_cache_in_memory( // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); Some((result, dep_node_index)) } @@ -595,8 +592,7 @@ pub(crate) fn incremental_verify_ich( { assert!( tcx.dep_graph().is_green(dep_node), - "fingerprint for green query instance not loaded from cache: {:?}", - dep_node, + "fingerprint for green query instance not loaded from cache: {dep_node:?}", ); let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { @@ -675,16 +671,16 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: sess.emit_err(crate::error::Reentrant); } else { let run_cmd = if let Some(crate_name) = &sess.opts.crate_name { - format!("`cargo clean -p {}` or `cargo clean`", crate_name) + format!("`cargo clean -p {crate_name}` or `cargo clean`") } else { "`cargo clean`".to_string() }; sess.emit_err(crate::error::IncrementCompilation { run_cmd, - dep_node: format!("{:?}", dep_node), + dep_node: format!("{dep_node:?}"), }); - panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result); + panic!("Found unstable fingerprints for {dep_node:?}: {result:?}"); } INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic)); @@ -699,23 +695,19 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run( - qcx: Qcx, - key: &K, - query: &QueryVTable, -) -> (bool, Option>) +fn ensure_must_run(qcx: Qcx, key: &Q::Key) -> (bool, Option>) where - K: crate::dep_graph::DepNodeParams, + Q: QueryConfig, Qcx: QueryContext, { - if query.eval_always { + if Q::EVAL_ALWAYS { return (true, None); } // Ensuring an anonymous query makes no sense - assert!(!query.anon); + assert!(!Q::ANON); - let dep_node = query.to_dep_node(*qcx.dep_context(), key); + let dep_node = Q::construct_dep_node(*qcx.dep_context(), key); let dep_graph = qcx.dep_context().dep_graph(); match dep_graph.try_mark_green(qcx, &dep_node) { @@ -746,13 +738,11 @@ pub fn get_query(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) where D: DepKind, Q: QueryConfig, - Q::Key: DepNodeParams, Q::Value: Value, Qcx: QueryContext, { - let query = Q::make_vtable(qcx, &key); let dep_node = if let QueryMode::Ensure = mode { - let (must_run, dep_node) = ensure_must_run(qcx, &key, &query); + let (must_run, dep_node) = ensure_must_run::(qcx, &key); if !must_run { return None; } @@ -761,14 +751,13 @@ pub fn get_query(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) None }; - let (result, dep_node_index) = try_execute_query( + let (result, dep_node_index) = try_execute_query::( qcx, Q::query_state(qcx), Q::query_cache(qcx), span, key, dep_node, - &query, ); if let Some(dep_node_index) = dep_node_index { qcx.dep_context().dep_graph().read_index(dep_node_index) @@ -780,27 +769,21 @@ pub fn force_query(qcx: Qcx, key: Q::Key, dep_node: DepNode, - Q::Key: DepNodeParams, Q::Value: Value, Qcx: QueryContext, { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. let cache = Q::query_cache(qcx); - let cached = cache.lookup(&key, |_, index| { + if let Some((_, index)) = cache.lookup(&key) { if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) { qcx.dep_context().profiler().query_cache_hit(index.into()); } - }); - - match cached { - Ok(()) => return, - Err(()) => {} + return; } - let query = Q::make_vtable(qcx, &key); let state = Q::query_state(qcx); - debug_assert!(!query.anon); + debug_assert!(!Q::ANON); - try_execute_query(qcx, state, cache, DUMMY_SP, key, Some(dep_node), &query); + try_execute_query::(qcx, state, cache, DUMMY_SP, key, Some(dep_node)); }