It was only needed by `find_cycle_in_stack()` in job.rs, but needed to be forwarded through dozens of types.
fn try_collect_active_jobs(
&self,
- ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
+ ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
+ {
self.queries.try_collect_active_jobs()
}
type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
#[inline(always)]
- fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
+ fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query, Self::Cache> {
&tcx.queries.$name
}
fallback_extern_providers: Box<Providers>,
$($(#[$attr])* $name: QueryState<
- TyCtxt<$tcx>,
+ crate::dep_graph::DepKind,
+ <TyCtxt<$tcx> as QueryContext>::Query,
<queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
>,)*
}
pub(crate) fn try_collect_active_jobs(
&self
- ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
+ ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query>>> {
let mut jobs = FxHashMap::default();
$(
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
-use rustc_query_system::query::QueryCache;
-use rustc_query_system::query::QueryState;
+use rustc_query_system::query::{QueryCache, QueryContext, QueryState};
use std::fmt::Debug;
use std::io::Write;
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
- query_state: &QueryState<TyCtxt<'tcx>, C>,
+ query_state: &QueryState<crate::dep_graph::DepKind, <TyCtxt<'tcx> as QueryContext>::Query, C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,
use crate::ty::query::queries;
use crate::ty::TyCtxt;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-use rustc_query_system::query::QueryCache;
-use rustc_query_system::query::QueryState;
-use rustc_query_system::query::{QueryAccessors, QueryContext};
+use rustc_query_system::query::{QueryAccessors, QueryCache, QueryContext, QueryState};
use std::any::type_name;
+use std::hash::Hash;
use std::mem;
#[cfg(debug_assertions)]
use std::sync::atomic::Ordering;
local_def_id_keys: Option<usize>,
}
-fn stats<CTX: QueryContext, C: QueryCache>(
- name: &'static str,
- map: &QueryState<CTX, C>,
-) -> QueryStats {
+fn stats<D, Q, C>(name: &'static str, map: &QueryState<D, Q, C>) -> QueryStats
+where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+ C: QueryCache,
+{
let mut stats = QueryStats {
name,
#[cfg(debug_assertions)]
$($(
queries.push(stats::<
- TyCtxt<'_>,
+ crate::dep_graph::DepKind,
+ <TyCtxt<'_> as QueryContext>::Query,
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
>(
stringify!($name),
use crate::dep_graph::DepNodeIndex;
use crate::query::plumbing::{QueryLookup, QueryState};
-use crate::query::QueryContext;
use rustc_arena::TypedArena;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::WorkerLocal;
use std::default::Default;
+use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
}
pub trait QueryCache: QueryStorage {
- type Key: Hash;
+ type Key: Hash + Eq + Clone + Debug;
type Sharded: Default;
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
- fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
+ fn lookup<D, Q, R, OnHit, OnMiss>(
&self,
- state: &QueryState<CTX, Self>,
+ state: &QueryState<D, Q, Self>,
key: Self::Key,
// `on_hit` can be called while holding a lock to the query state shard.
on_hit: OnHit,
) -> R
where
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
- OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R;
+ OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
fn complete(
&self,
}
}
-impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
+impl<K, V> QueryCache for DefaultCache<K, V>
+where
+ K: Eq + Hash + Clone + Debug,
+ V: Clone,
+{
type Key = K;
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)]
- fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
+ fn lookup<D, Q, R, OnHit, OnMiss>(
&self,
- state: &QueryState<CTX, Self>,
+ state: &QueryState<D, Q, Self>,
key: K,
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
OnHit: FnOnce(&V, DepNodeIndex) -> R,
- OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
+ OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{
let mut lookup = state.get_lookup(&key);
let lock = &mut *lookup.lock;
}
}
-impl<'tcx, K: Eq + Hash, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V> {
+impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
+where
+ K: Eq + Hash + Clone + Debug,
+{
type Key = K;
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
#[inline(always)]
- fn lookup<CTX: QueryContext, R, OnHit, OnMiss>(
+ fn lookup<D, Q, R, OnHit, OnMiss>(
&self,
- state: &QueryState<CTX, Self>,
+ state: &QueryState<D, Q, Self>,
key: K,
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
- OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R,
+ OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{
let mut lookup = state.get_lookup(&key);
let lock = &mut *lookup.lock;
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>;
+ fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Cache>;
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
where
-use crate::dep_graph::{DepContext, DepKind};
use crate::query::plumbing::CycleError;
-use crate::query::QueryContext;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::Span;
use std::convert::TryFrom;
+use std::hash::Hash;
use std::marker::PhantomData;
use std::num::NonZeroU32;
#[cfg(parallel_compiler)]
use {
+ super::QueryContext,
parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
pub query: Q,
}
-type QueryMap<CTX> = FxHashMap<QueryJobId<<CTX as DepContext>::DepKind>, QueryJobInfo<CTX>>;
+pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>;
/// A value uniquely identifiying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
/// A value uniquely identifiying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
-pub struct QueryJobId<K> {
+pub struct QueryJobId<D> {
/// Which job within a shard is this
pub job: QueryShardJobId,
/// In which shard is this job
pub shard: u16,
- /// What kind of query this job is
- pub kind: K,
+ /// What kind of query this job is.
+ pub kind: D,
}
-impl<K: DepKind> QueryJobId<K> {
- pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self {
+impl<D> QueryJobId<D>
+where
+ D: Copy + Clone + Eq + Hash,
+{
+ pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
}
- fn query<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> CTX::Query {
+ fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q {
map.get(&self).unwrap().info.query.clone()
}
#[cfg(parallel_compiler)]
- fn span<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Span {
+ fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
- fn parent<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Option<QueryJobId<K>> {
+ fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
- fn latch<'a, CTX: QueryContext<DepKind = K>>(
- self,
- map: &'a QueryMap<CTX>,
- ) -> Option<&'a QueryLatch<CTX>> {
+ fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
-pub struct QueryJobInfo<CTX: QueryContext> {
- pub info: QueryInfo<CTX::Query>,
- pub job: QueryJob<CTX>,
+pub struct QueryJobInfo<D, Q> {
+ pub info: QueryInfo<Q>,
+ pub job: QueryJob<D, Q>,
}
/// Represents an active query job.
#[derive(Clone)]
-pub struct QueryJob<CTX: QueryContext> {
+pub struct QueryJob<D, Q> {
pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required.
pub span: Span,
/// The parent query job which created this job and is implicitly waiting on it.
- pub parent: Option<QueryJobId<CTX::DepKind>>,
+ pub parent: Option<QueryJobId<D>>,
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
- latch: Option<QueryLatch<CTX>>,
+ latch: Option<QueryLatch<D, Q>>,
- dummy: PhantomData<QueryLatch<CTX>>,
+ dummy: PhantomData<QueryLatch<D, Q>>,
}
-impl<CTX: QueryContext> QueryJob<CTX> {
+impl<D, Q> QueryJob<D, Q>
+where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+{
/// Creates a new query job.
- pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<CTX::DepKind>>) -> Self {
+ pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
QueryJob {
id,
span,
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self, _id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
+ pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
}
#[cfg(not(parallel_compiler))]
- pub(super) fn latch(&mut self, id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> {
+ pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> {
QueryLatch { id, dummy: PhantomData }
}
#[cfg(not(parallel_compiler))]
#[derive(Clone)]
-pub(super) struct QueryLatch<CTX: QueryContext> {
- id: QueryJobId<CTX::DepKind>,
- dummy: PhantomData<CTX>,
+pub(super) struct QueryLatch<D, Q> {
+ id: QueryJobId<D>,
+ dummy: PhantomData<Q>,
}
#[cfg(not(parallel_compiler))]
-impl<CTX: QueryContext> QueryLatch<CTX> {
- pub(super) fn find_cycle_in_stack(&self, tcx: CTX, span: Span) -> CycleError<CTX::Query> {
- let query_map = tcx.try_collect_active_jobs().unwrap();
-
- // Get the current executing query (waiter) and find the waitee amongst its parents
- let mut current_job = tcx.current_query_job();
+impl<D, Q> QueryLatch<D, Q>
+where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+{
+ pub(super) fn find_cycle_in_stack(
+ &self,
+ query_map: QueryMap<D, Q>,
+ current_job: &Option<QueryJobId<D>>,
+ span: Span,
+ ) -> CycleError<Q> {
+ // Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
+ let mut current_job = Option::clone(current_job);
while let Some(job) = current_job {
let info = query_map.get(&job).unwrap();
}
#[cfg(parallel_compiler)]
-struct QueryWaiter<CTX: QueryContext> {
- query: Option<QueryJobId<CTX::DepKind>>,
+struct QueryWaiter<D, Q> {
+ query: Option<QueryJobId<D>>,
condvar: Condvar,
span: Span,
- cycle: Lock<Option<CycleError<CTX::Query>>>,
+ cycle: Lock<Option<CycleError<Q>>>,
}
#[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryWaiter<CTX> {
+impl<D, Q> QueryWaiter<D, Q> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
}
#[cfg(parallel_compiler)]
-struct QueryLatchInfo<CTX: QueryContext> {
+struct QueryLatchInfo<D, Q> {
complete: bool,
- waiters: Vec<Lrc<QueryWaiter<CTX>>>,
+ waiters: Vec<Lrc<QueryWaiter<D, Q>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
-pub(super) struct QueryLatch<CTX: QueryContext> {
- info: Lrc<Mutex<QueryLatchInfo<CTX>>>,
+pub(super) struct QueryLatch<D, Q> {
+ info: Lrc<Mutex<QueryLatchInfo<D, Q>>>,
}
#[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryLatch<CTX> {
+impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
fn new() -> Self {
QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
}
#[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryLatch<CTX> {
+impl<D, Q> QueryLatch<D, Q> {
/// Awaits for the query job to complete.
- pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError<CTX::Query>> {
- let query = tcx.current_query_job();
+ pub(super) fn wait_on(
+ &self,
+ query: Option<QueryJobId<D>>,
+ span: Span,
+ ) -> Result<(), CycleError<Q>> {
let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
Some(cycle) => Err(cycle),
}
}
-}
-#[cfg(parallel_compiler)]
-impl<CTX: QueryContext> QueryLatch<CTX> {
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<CTX>>) {
+ fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<CTX>> {
+ fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_compiler)]
-type Waiter<K> = (QueryJobId<K>, usize);
+type Waiter<D> = (QueryJobId<D>, usize);
/// Visits all the non-resumable and resumable waiters of a query.
/// Only waiters in a query are visited.
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
-fn visit_waiters<CTX: QueryContext, F>(
- query_map: &QueryMap<CTX>,
- query: QueryJobId<CTX::DepKind>,
+fn visit_waiters<D, Q, F>(
+ query_map: &QueryMap<D, Q>,
+ query: QueryJobId<D>,
mut visit: F,
-) -> Option<Option<Waiter<CTX::DepKind>>>
+) -> Option<Option<Waiter<D>>>
where
- F: FnMut(Span, QueryJobId<CTX::DepKind>) -> Option<Option<Waiter<CTX::DepKind>>>,
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+ F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) {
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
-fn cycle_check<CTX: QueryContext>(
- query_map: &QueryMap<CTX>,
- query: QueryJobId<CTX::DepKind>,
+fn cycle_check<D, Q>(
+ query_map: &QueryMap<D, Q>,
+ query: QueryJobId<D>,
span: Span,
- stack: &mut Vec<(Span, QueryJobId<CTX::DepKind>)>,
- visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
-) -> Option<Option<Waiter<CTX::DepKind>>> {
+ stack: &mut Vec<(Span, QueryJobId<D>)>,
+ visited: &mut FxHashSet<QueryJobId<D>>,
+) -> Option<Option<Waiter<D>>>
+where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+{
if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
// We detected a query cycle, fix up the initial span and return Some
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
-fn connected_to_root<CTX: QueryContext>(
- query_map: &QueryMap<CTX>,
- query: QueryJobId<CTX::DepKind>,
- visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>,
-) -> bool {
+fn connected_to_root<D, Q>(
+ query_map: &QueryMap<D, Q>,
+ query: QueryJobId<D>,
+ visited: &mut FxHashSet<QueryJobId<D>>,
+) -> bool
+where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+{
// We already visited this or we're deliberately ignoring it
if !visited.insert(query) {
return false;
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
-fn pick_query<'a, CTX, T, F>(query_map: &QueryMap<CTX>, tcx: CTX, queries: &'a [T], f: F) -> &'a T
+fn pick_query<'a, CTX, T, F>(
+ query_map: &QueryMap<CTX::DepKind, CTX::Query>,
+ tcx: CTX,
+ queries: &'a [T],
+ f: F,
+) -> &'a T
where
CTX: QueryContext,
F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
/// the function returns false.
#[cfg(parallel_compiler)]
fn remove_cycle<CTX: QueryContext>(
- query_map: &QueryMap<CTX>,
+ query_map: &QueryMap<CTX::DepKind, CTX::Query>,
jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
- wakelist: &mut Vec<Lrc<QueryWaiter<CTX>>>,
+ wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>,
tcx: CTX,
) -> bool {
let mut visited = FxHashSet::default();
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepContext, DepGraph};
+use crate::query::job::QueryMap;
-use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::HashStable;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
/// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
- fn try_collect_active_jobs(
- &self,
- ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>>;
+ fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
-use crate::query::QueryContext;
+use crate::query::{QueryContext, QueryMap};
#[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path;
use rustc_span::source_map::DUMMY_SP;
use rustc_span::Span;
use std::collections::hash_map::Entry;
-use std::convert::TryFrom;
-use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::mem;
use std::num::NonZeroU32;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
-pub(super) struct QueryStateShard<CTX: QueryContext, K, C> {
+pub(super) struct QueryStateShard<D, Q, K, C> {
pub(super) cache: C,
- active: FxHashMap<K, QueryResult<CTX>>,
+ active: FxHashMap<K, QueryResult<D, Q>>,
/// Used to generate unique ids for active jobs.
jobs: u32,
}
-impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> {
- fn default() -> QueryStateShard<CTX, K, C> {
+impl<D, Q, K, C: Default> Default for QueryStateShard<D, Q, K, C> {
+ fn default() -> QueryStateShard<D, Q, K, C> {
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
}
}
-pub struct QueryState<CTX: QueryContext, C: QueryCache> {
+pub struct QueryState<D, Q, C: QueryCache> {
cache: C,
- shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>,
+ shards: Sharded<QueryStateShard<D, Q, C::Key, C::Sharded>>,
#[cfg(debug_assertions)]
pub cache_hits: AtomicUsize,
}
-impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
+impl<D, Q, C: QueryCache> QueryState<D, Q, C> {
#[inline]
pub(super) fn get_lookup<'tcx>(
&'tcx self,
key: &C::Key,
- ) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> {
+ ) -> QueryLookup<'tcx, D, Q, C::Key, C::Sharded> {
// We compute the key's hash once and then use it for both the
// shard lookup and the hashmap lookup. This relies on the fact
// that both of them use `FxHasher`.
}
/// Indicates the state of a query for a given key in a query map.
-enum QueryResult<CTX: QueryContext> {
+enum QueryResult<D, Q> {
/// An already executing query. The query job can be used to await for its completion.
- Started(QueryJob<CTX>),
+ Started(QueryJob<D, Q>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
-impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
+impl<D, Q, C> QueryState<D, Q, C>
+where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+ C: QueryCache,
+{
#[inline(always)]
pub fn iter_results<R>(
&self,
pub fn try_collect_active_jobs(
&self,
- kind: CTX::DepKind,
- make_query: fn(C::Key) -> CTX::Query,
- jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>,
- ) -> Option<()>
- where
- C::Key: Clone,
- {
+ kind: D,
+ make_query: fn(C::Key) -> Q,
+ jobs: &mut QueryMap<D, Q>,
+ ) -> Option<()> {
// We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked.
let shards = self.shards.try_lock_shards()?;
jobs.extend(shards.flat_map(|(shard_id, shard)| {
shard.active.iter().filter_map(move |(k, v)| {
if let QueryResult::Started(ref job) = *v {
- let id =
- QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
+ let id = QueryJobId::new(job.id, shard_id, kind);
let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
Some((id, QueryJobInfo { info, job: job.clone() }))
} else {
}
}
-impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
- fn default() -> QueryState<CTX, C> {
+impl<D, Q, C: QueryCache> Default for QueryState<D, Q, C> {
+ fn default() -> QueryState<D, Q, C> {
QueryState {
cache: C::default(),
shards: Default::default(),
}
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
-pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> {
+pub struct QueryLookup<'tcx, D, Q, K, C> {
pub(super) key_hash: u64,
shard: usize,
- pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>,
+ pub(super) lock: LockGuard<'tcx, QueryStateShard<D, Q, K, C>>,
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, CTX: QueryContext, C>
+struct JobOwner<'tcx, D, Q, C>
where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
C: QueryCache,
- C::Key: Eq + Hash + Clone + Debug,
{
- state: &'tcx QueryState<CTX, C>,
+ state: &'tcx QueryState<D, Q, C>,
key: C::Key,
- id: QueryJobId<CTX::DepKind>,
+ id: QueryJobId<D>,
}
-impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
+impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C>
where
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
C: QueryCache,
- C::Key: Eq + Hash + Clone + Debug,
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
- fn try_start<'a, 'b>(
+ fn try_start<'a, 'b, CTX>(
tcx: CTX,
- state: &'b QueryState<CTX, C>,
+ state: &'b QueryState<CTX::DepKind, CTX::Query, C>,
span: Span,
key: &C::Key,
- mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
+ mut lookup: QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
query: &QueryVtable<CTX, C::Key, C::Value>,
- ) -> TryGetJob<'b, CTX, C>
+ ) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
where
CTX: QueryContext,
{
// so we just return the error.
#[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| {
- let value = query.handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span));
+ let error: CycleError<CTX::Query> = latch.find_cycle_in_stack(
+ tcx.try_collect_active_jobs().unwrap(),
+ &tcx.current_query_job(),
+ span,
+ );
+ let value = query.handle_cycle_error(tcx, error);
state.cache.store_nocache(value)
}));
// thread.
#[cfg(parallel_compiler)]
{
- let result = latch.wait_on(tcx, span);
+ let result = latch.wait_on(tcx.current_query_job(), span);
if let Err(cycle) = result {
let value = query.handle_cycle_error(tcx, cycle);
(result, diagnostics.into_inner())
}
-impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
+impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C>
where
- C::Key: Eq + Hash + Clone + Debug,
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+ C: QueryCache,
{
#[inline(never)]
#[cold]
}
/// The result of `try_start`.
-enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
+enum TryGetJob<'tcx, D, Q, C>
where
- C::Key: Eq + Hash + Clone + Debug,
+ D: Copy + Clone + Eq + Hash,
+ Q: Clone,
+ C: QueryCache,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
- NotYetStarted(JobOwner<'tcx, CTX, C>),
+ NotYetStarted(JobOwner<'tcx, D, Q, C>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
#[inline(always)]
fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
tcx: CTX,
- state: &QueryState<CTX, C>,
+ state: &QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key,
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
C: QueryCache,
CTX: QueryContext,
OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
- OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
+ OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
{
state.cache.lookup(
state,
#[inline(always)]
fn try_execute_query<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX, C>,
+ state: &QueryState<CTX::DepKind, CTX::Query, C>,
span: Span,
key: C::Key,
- lookup: QueryLookup<'_, CTX, C::Key, C::Sharded>,
+ lookup: QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> C::Stored
where
C: QueryCache,
- C::Key: Eq + Clone + Debug + crate::dep_graph::DepNodeParams<CTX>,
- C::Stored: Clone,
+ C::Key: crate::dep_graph::DepNodeParams<CTX>,
CTX: QueryContext,
{
- let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) {
+ let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
+ tcx, state, span, &key, lookup, query,
+ ) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)]
fn force_query_with_job<C, CTX>(
tcx: CTX,
key: C::Key,
- job: JobOwner<'_, CTX, C>,
+ job: JobOwner<'_, CTX::DepKind, CTX::Query, C>,
dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> (C::Stored, DepNodeIndex)
where
C: QueryCache,
- C::Key: Eq + Clone + Debug,
- C::Stored: Clone,
CTX: QueryContext,
{
// If the following assertion triggers, it can have two reasons:
#[inline(never)]
fn get_query_impl<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX, C>,
+ state: &QueryState<CTX::DepKind, CTX::Query, C>,
span: Span,
key: C::Key,
query: &QueryVtable<CTX, C::Key, C::Value>,
where
CTX: QueryContext,
C: QueryCache,
- C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
- C::Stored: Clone,
+ C::Key: crate::dep_graph::DepNodeParams<CTX>,
{
try_get_cached(
tcx,
#[inline(never)]
fn ensure_query_impl<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX, C>,
+ state: &QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key,
query: &QueryVtable<CTX, C::Key, C::Value>,
) where
C: QueryCache,
- C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
+ C::Key: crate::dep_graph::DepNodeParams<CTX>,
CTX: QueryContext,
{
if query.eval_always {
#[inline(never)]
fn force_query_impl<CTX, C>(
tcx: CTX,
- state: &QueryState<CTX, C>,
+ state: &QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key,
span: Span,
dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>,
) where
C: QueryCache,
- C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>,
+ C::Key: crate::dep_graph::DepNodeParams<CTX>,
CTX: QueryContext,
{
// We may be concurrently trying both execute and force a query.
// Cache hit, do nothing
},
|key, lookup| {
- let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) {
+ let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
+ tcx, state, span, &key, lookup, query,
+ ) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return,
#[cfg(parallel_compiler)]