--- /dev/null
+use crate::dep_graph::DepNodeIndex;
+use crate::ty::query::config::QueryAccessors;
+use crate::ty::query::plumbing::{QueryLookup, QueryState, QueryStateShard};
+use crate::ty::TyCtxt;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sharded::Sharded;
+use std::default::Default;
+use std::hash::Hash;
+
+pub(crate) trait CacheSelector<K, V> {
+ type Cache: QueryCache<K, V>;
+}
+
+pub(crate) trait QueryCache<K, V>: Default {
+ type Sharded: Default;
+
+ /// Checks if the query is already computed and in the cache.
+ /// It returns the shard index and a lock guard to the shard,
+ /// which will be used if the query is not in the cache and we need
+ /// to compute it.
+ fn lookup<'tcx, R, GetCache, OnHit, OnMiss, Q>(
+ &self,
+ state: &'tcx QueryState<'tcx, Q>,
+ get_cache: GetCache,
+ key: K,
+ // `on_hit` can be called while holding a lock to the query state shard.
+ on_hit: OnHit,
+ on_miss: OnMiss,
+ ) -> R
+ where
+ Q: QueryAccessors<'tcx>,
+ GetCache: for<'a> Fn(&'a mut QueryStateShard<'tcx, Q>) -> &'a mut Self::Sharded,
+ OnHit: FnOnce(&V, DepNodeIndex) -> R,
+ OnMiss: FnOnce(K, QueryLookup<'tcx, Q>) -> R;
+
+ fn complete(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ lock_sharded_storage: &mut Self::Sharded,
+ key: K,
+ value: V,
+ index: DepNodeIndex,
+ );
+
+ fn iter<R, L>(
+ &self,
+ shards: &Sharded<L>,
+ get_shard: impl Fn(&mut L) -> &mut Self::Sharded,
+ f: impl for<'a> FnOnce(Box<dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)> + 'a>) -> R,
+ ) -> R;
+}
+
+pub struct DefaultCacheSelector;
+
+impl<K: Eq + Hash, V: Clone> CacheSelector<K, V> for DefaultCacheSelector {
+ type Cache = DefaultCache;
+}
+
+#[derive(Default)]
+pub struct DefaultCache;
+
+impl<K: Eq + Hash, V: Clone> QueryCache<K, V> for DefaultCache {
+ type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
+
+ #[inline(always)]
+ fn lookup<'tcx, R, GetCache, OnHit, OnMiss, Q>(
+ &self,
+ state: &'tcx QueryState<'tcx, Q>,
+ get_cache: GetCache,
+ key: K,
+ on_hit: OnHit,
+ on_miss: OnMiss,
+ ) -> R
+ where
+ Q: QueryAccessors<'tcx>,
+ GetCache: for<'a> Fn(&'a mut QueryStateShard<'tcx, Q>) -> &'a mut Self::Sharded,
+ OnHit: FnOnce(&V, DepNodeIndex) -> R,
+ OnMiss: FnOnce(K, QueryLookup<'tcx, Q>) -> R,
+ {
+ let mut lookup = state.get_lookup(&key);
+ let lock = &mut *lookup.lock;
+
+ let result = get_cache(lock).raw_entry().from_key_hashed_nocheck(lookup.key_hash, &key);
+
+ if let Some((_, value)) = result { on_hit(&value.0, value.1) } else { on_miss(key, lookup) }
+ }
+
+ #[inline]
+ fn complete(
+ &self,
+ _: TyCtxt<'tcx>,
+ lock_sharded_storage: &mut Self::Sharded,
+ key: K,
+ value: V,
+ index: DepNodeIndex,
+ ) {
+ lock_sharded_storage.insert(key, (value, index));
+ }
+
+ fn iter<R, L>(
+ &self,
+ shards: &Sharded<L>,
+ get_shard: impl Fn(&mut L) -> &mut Self::Sharded,
+ f: impl for<'a> FnOnce(Box<dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)> + 'a>) -> R,
+ ) -> R {
+ let mut shards = shards.lock_shards();
+ let mut shards: Vec<_> = shards.iter_mut().map(|shard| get_shard(shard)).collect();
+ let results = shards.iter_mut().flat_map(|shard| shard.iter()).map(|(k, v)| (k, &v.0, v.1));
+ f(Box::new(results))
+ }
+}
use crate::dep_graph::SerializedDepNodeIndex;
use crate::dep_graph::{DepKind, DepNode};
+use crate::ty::query::caches::QueryCache;
use crate::ty::query::plumbing::CycleError;
use crate::ty::query::queries;
-use crate::ty::query::{Query, QueryCache};
+use crate::ty::query::{Query, QueryState};
use crate::ty::TyCtxt;
use rustc_data_structures::profiling::ProfileCategory;
use rustc_hir::def_id::{CrateNum, DefId};
use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_data_structures::sharded::Sharded;
use std::borrow::Cow;
use std::fmt::Debug;
use std::hash::Hash;
const ANON: bool;
const EVAL_ALWAYS: bool;
+ type Cache: QueryCache<Self::Key, Self::Value>;
+
fn query(key: Self::Key) -> Query<'tcx>;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_cache<'a>(tcx: TyCtxt<'tcx>) -> &'a Sharded<QueryCache<'tcx, Self>>;
+ fn query_state<'a>(tcx: TyCtxt<'tcx>) -> &'a QueryState<'tcx, Self>;
fn to_dep_node(tcx: TyCtxt<'tcx>, key: &Self::Key) -> DepNode;
}
}
-impl<'tcx, M: QueryAccessors<'tcx, Key = DefId>> QueryDescription<'tcx> for M {
+impl<'tcx, M: QueryAccessors<'tcx, Key = DefId>> QueryDescription<'tcx> for M
+where
+ <M as QueryAccessors<'tcx>>::Cache: QueryCache<DefId, <M as QueryConfig<'tcx>>::Value>,
+{
default fn describe(tcx: TyCtxt<'_>, def_id: DefId) -> Cow<'static, str> {
if !tcx.sess.verbose() {
format!("processing `{}`", tcx.def_path_str(def_id)).into()
use crate::mir;
use crate::traits;
use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::query::caches::DefaultCacheSelector;
use crate::ty::subst::SubstsRef;
use crate::ty::{self, Ty, TyCtxt};
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE};
/// The `Key` trait controls what types can legally be used as the key
/// for a query.
-pub(super) trait Key {
+pub trait Key {
+ type CacheSelector;
+
/// Given an instance of this key, what crate is it referring to?
/// This is used to find the provider.
fn query_crate(&self) -> CrateNum;
}
impl<'tcx> Key for ty::InstanceDef<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx> Key for ty::Instance<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.instance.query_crate()
}
}
impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl Key for CrateNum {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
*self
}
}
impl Key for DefIndex {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl Key for DefId {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.krate
}
}
impl Key for (DefId, DefId) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.0.krate
}
}
impl Key for (CrateNum, DefId) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.0
}
}
impl Key for (DefId, SimplifiedType) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.0.krate
}
}
impl<'tcx> Key for SubstsRef<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.0.krate
}
}
impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.1.def_id().krate
}
}
impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.def_id().krate
}
}
impl<'tcx> Key for &'tcx ty::Const<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx> Key for Ty<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx> Key for ty::ParamEnv<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
self.value.query_crate()
}
}
impl<'tcx> Key for traits::Environment<'tcx> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl Key for Symbol {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
/// Canonical query goals correspond to abstract trait operations that
/// are not tied to any crate in particular.
impl<'tcx, T> Key for Canonical<'tcx, T> {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
}
impl Key for (Symbol, u32, u32) {
+ type CacheSelector = DefaultCacheSelector;
+
fn query_crate(&self) -> CrateNum {
LOCAL_CRATE
}
mod values;
use self::values::Value;
+mod caches;
+use self::caches::CacheSelector;
+
mod config;
use self::config::QueryAccessors;
pub use self::config::QueryConfig;
.prof
.extra_verbose_generic_activity("encode_query_results_for", ::std::any::type_name::<Q>());
- let shards = Q::query_cache(tcx).lock_shards();
- assert!(shards.iter().all(|shard| shard.active.is_empty()));
- for (key, entry) in shards.iter().flat_map(|shard| shard.results.iter()) {
- if Q::cache_on_disk(tcx, key.clone(), Some(&entry.value)) {
- let dep_node = SerializedDepNodeIndex::new(entry.index.index());
-
- // Record position of the cache entry.
- query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position())));
-
- // Encode the type check tables with the `SerializedDepNodeIndex`
- // as tag.
- encoder.encode_tagged(dep_node, &entry.value)?;
- }
- }
+ let state = Q::query_state(tcx);
+ assert!(state.all_inactive());
+
+ state.iter_results(|results| {
+ for (key, value, dep_node) in results {
+ if Q::cache_on_disk(tcx, key.clone(), Some(&value)) {
+ let dep_node = SerializedDepNodeIndex::new(dep_node.index());
+
+ // Record position of the cache entry.
+ query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position())));
- Ok(())
+ // Encode the type check tables with the `SerializedDepNodeIndex`
+ // as tag.
+ encoder.encode_tagged(dep_node, &value)?;
+ }
+ }
+ Ok(())
+ })
}
//! manage the caches, and so forth.
use crate::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
-use crate::ty::query::config::{QueryConfig, QueryDescription};
+use crate::ty::query::caches::QueryCache;
+use crate::ty::query::config::{QueryAccessors, QueryDescription};
use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryShardJobId};
use crate::ty::query::Query;
use crate::ty::tls;
use std::num::NonZeroU32;
use std::ptr;
-pub struct QueryCache<'tcx, D: QueryConfig<'tcx> + ?Sized> {
- pub(super) results: FxHashMap<D::Key, QueryValue<D::Value>>,
+pub(crate) struct QueryStateShard<'tcx, D: QueryAccessors<'tcx> + ?Sized> {
+ pub(super) cache: <<D as QueryAccessors<'tcx>>::Cache as QueryCache<D::Key, D::Value>>::Sharded,
pub(super) active: FxHashMap<D::Key, QueryResult<'tcx>>,
/// Used to generate unique ids for active jobs.
pub(super) jobs: u32,
+}
+impl<'tcx, Q: QueryAccessors<'tcx>> QueryStateShard<'tcx, Q> {
+ fn get_cache(
+ &mut self,
+ ) -> &mut <<Q as QueryAccessors<'tcx>>::Cache as QueryCache<Q::Key, Q::Value>>::Sharded {
+ &mut self.cache
+ }
+}
+
+impl<'tcx, Q: QueryAccessors<'tcx>> Default for QueryStateShard<'tcx, Q> {
+ fn default() -> QueryStateShard<'tcx, Q> {
+ QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
+ }
+}
+
+pub(crate) struct QueryState<'tcx, D: QueryAccessors<'tcx> + ?Sized> {
+ pub(super) cache: D::Cache,
+ pub(super) shards: Sharded<QueryStateShard<'tcx, D>>,
#[cfg(debug_assertions)]
pub(super) cache_hits: usize,
}
-pub(super) struct QueryValue<T> {
- pub(super) value: T,
- pub(super) index: DepNodeIndex,
-}
+impl<'tcx, Q: QueryAccessors<'tcx>> QueryState<'tcx, Q> {
+ pub(super) fn get_lookup<K: Hash>(&'tcx self, key: &K) -> QueryLookup<'tcx, Q> {
+ // We compute the key's hash once and then use it for both the
+ // shard lookup and the hashmap lookup. This relies on the fact
+ // that both of them use `FxHasher`.
+ let mut hasher = FxHasher::default();
+ key.hash(&mut hasher);
+ let key_hash = hasher.finish();
-impl<T> QueryValue<T> {
- pub(super) fn new(value: T, dep_node_index: DepNodeIndex) -> QueryValue<T> {
- QueryValue { value, index: dep_node_index }
+ let shard = self.shards.get_shard_index_by_hash(key_hash);
+ let lock = self.shards.get_shard_by_index(shard).lock();
+ QueryLookup { key_hash, shard, lock }
}
}
Poisoned,
}
-impl<'tcx, M: QueryConfig<'tcx>> Default for QueryCache<'tcx, M> {
- fn default() -> QueryCache<'tcx, M> {
- QueryCache {
- results: FxHashMap::default(),
- active: FxHashMap::default(),
- jobs: 0,
+impl<'tcx, M: QueryAccessors<'tcx>> QueryState<'tcx, M> {
+ pub fn iter_results<R>(
+ &self,
+ f: impl for<'a> FnOnce(
+ Box<dyn Iterator<Item = (&'a M::Key, &'a M::Value, DepNodeIndex)> + 'a>,
+ ) -> R,
+ ) -> R {
+ self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
+ }
+ pub fn all_inactive(&self) -> bool {
+ let shards = self.shards.lock_shards();
+ shards.iter().all(|shard| shard.active.is_empty())
+ }
+}
+
+impl<'tcx, M: QueryAccessors<'tcx>> Default for QueryState<'tcx, M> {
+ fn default() -> QueryState<'tcx, M> {
+ QueryState {
+ cache: M::Cache::default(),
+ shards: Default::default(),
#[cfg(debug_assertions)]
cache_hits: 0,
}
}
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
-pub(super) struct QueryLookup<'tcx, Q: QueryDescription<'tcx>> {
- shard: usize,
- lock: LockGuard<'tcx, QueryCache<'tcx, Q>>,
+pub(crate) struct QueryLookup<'tcx, Q: QueryAccessors<'tcx>> {
+ pub(super) key_hash: u64,
+ pub(super) shard: usize,
+ pub(super) lock: LockGuard<'tcx, QueryStateShard<'tcx, Q>>,
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-pub(super) struct JobOwner<'a, 'tcx, Q: QueryDescription<'tcx>> {
- cache: &'a Sharded<QueryCache<'tcx, Q>>,
+pub(super) struct JobOwner<'tcx, Q: QueryDescription<'tcx>> {
+ tcx: TyCtxt<'tcx>,
key: Q::Key,
id: QueryJobId,
}
-impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
+impl<'tcx, Q: QueryDescription<'tcx>> JobOwner<'tcx, Q> {
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
/// This function assumes that `try_get_cached` is already called and returned `lookup`.
span: Span,
key: &Q::Key,
mut lookup: QueryLookup<'tcx, Q>,
- ) -> TryGetJob<'a, 'tcx, Q> {
+ ) -> TryGetJob<'tcx, Q> {
let lock = &mut *lookup.lock;
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
entry.insert(QueryResult::Started(job));
- let owner =
- JobOwner { cache: Q::query_cache(tcx), id: global_id, key: (*key).clone() };
+ let owner = JobOwner { tcx, id: global_id, key: (*key).clone() };
return TryGetJob::NotYetStarted(owner);
}
};
pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) {
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
- let cache = self.cache;
+ let tcx = self.tcx;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
- let value = QueryValue::new(result.clone(), dep_node_index);
let job = {
- let mut lock = cache.get_shard_by_value(&key).lock();
+ let state = Q::query_state(tcx);
+ let result = result.clone();
+ let mut lock = state.shards.get_shard_by_value(&key).lock();
let job = match lock.active.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
- lock.results.insert(key, value);
+ state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
job
};
(result, diagnostics.into_inner())
}
-impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
+impl<'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'tcx, Q> {
#[inline(never)]
#[cold]
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic.
- let shard = self.cache.get_shard_by_value(&self.key);
+ let state = Q::query_state(self.tcx);
+ let shard = state.shards.get_shard_by_value(&self.key);
let job = {
let mut shard = shard.lock();
let job = match shard.active.remove(&self.key).unwrap() {
pub(super) cycle: Vec<QueryInfo<'tcx>>,
}
-/// The result of `try_get_lock`.
-pub(super) enum TryGetJob<'a, 'tcx, D: QueryDescription<'tcx>> {
+/// The result of `try_start`.
+pub(super) enum TryGetJob<'tcx, D: QueryDescription<'tcx>> {
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
- NotYetStarted(JobOwner<'a, 'tcx, D>),
+ NotYetStarted(JobOwner<'tcx, D>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
OnHit: FnOnce(&Q::Value, DepNodeIndex) -> R,
OnMiss: FnOnce(Q::Key, QueryLookup<'tcx, Q>) -> R,
{
- let cache = Q::query_cache(self);
-
- // We compute the key's hash once and then use it for both the
- // shard lookup and the hashmap lookup. This relies on the fact
- // that both of them use `FxHasher`.
- let mut state = FxHasher::default();
- key.hash(&mut state);
- let key_hash = state.finish();
-
- let shard = cache.get_shard_index_by_hash(key_hash);
- let mut lock_guard = cache.get_shard_by_index(shard).lock();
- let lock = &mut *lock_guard;
+ let state = Q::query_state(self);
- let result = lock.results.raw_entry().from_key_hashed_nocheck(key_hash, &key);
-
- if let Some((_, value)) = result {
- if unlikely!(self.prof.enabled()) {
- self.prof.query_cache_hit(value.index.into());
- }
-
- on_hit(&value.value, value.index)
- } else {
- on_miss(key, QueryLookup { lock: lock_guard, shard })
- }
+ state.cache.lookup(
+ state,
+ QueryStateShard::<Q>::get_cache,
+ key,
+ |value, index| {
+ if unlikely!(self.prof.enabled()) {
+ self.prof.query_cache_hit(index.into());
+ }
+ on_hit(value, index)
+ },
+ on_miss,
+ )
}
#[inline(never)]
self.dep_graph.read_index(index);
value.clone()
},
- |key, lookup| self.try_execute_query(span, key, lookup),
+ |key, lookup| self.try_execute_query::<Q>(span, key, lookup),
)
}
fn force_query_with_job<Q: QueryDescription<'tcx>>(
self,
key: Q::Key,
- job: JobOwner<'_, 'tcx, Q>,
+ job: JobOwner<'tcx, Q>,
dep_node: DepNode,
) -> (Q::Value, DepNodeIndex) {
// If the following assertion triggers, it can have two reasons:
[$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
use std::mem;
- use rustc_data_structures::sharded::Sharded;
use crate::{
rustc_data_structures::stable_hasher::HashStable,
rustc_data_structures::stable_hasher::StableHasher,
$(
// We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked.
- let shards = self.$name.try_lock_shards()?;
+ let shards = self.$name.shards.try_lock_shards()?;
let shards = shards.iter().enumerate();
jobs.extend(shards.flat_map(|(shard_id, shard)| {
shard.active.iter().filter_map(move |(k, v)| {
- if let QueryResult::Started(ref job) = *v {
+ if let QueryResult::Started(ref job) = *v {
let id = QueryJobId {
job: job.id,
shard: u16::try_from(shard_id).unwrap(),
query: queries::$name::query(k.clone())
};
Some((id, QueryJobInfo { info, job: job.clone() }))
- } else {
- None
- }
+ } else {
+ None
+ }
})
}));
)*
entry_count: usize,
}
- fn stats<'tcx, Q: QueryConfig<'tcx>>(
+ fn stats<'tcx, Q: QueryAccessors<'tcx>>(
name: &'static str,
- map: &Sharded<QueryCache<'tcx, Q>>,
+ map: &QueryState<'tcx, Q>,
) -> QueryStats {
- let map = map.lock_shards();
QueryStats {
name,
#[cfg(debug_assertions)]
- cache_hits: map.iter().map(|shard| shard.cache_hits).sum(),
+ cache_hits: map.cache_hits,
#[cfg(not(debug_assertions))]
cache_hits: 0,
key_size: mem::size_of::<Q::Key>(),
key_type: type_name::<Q::Key>(),
value_size: mem::size_of::<Q::Value>(),
value_type: type_name::<Q::Value>(),
- entry_count: map.iter().map(|shard| shard.results.len()).sum(),
+ entry_count: map.iter_results(|results| results.count()),
}
}
$(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> {
type Key = $K;
type Value = $V;
-
const NAME: &'static str = stringify!($name);
const CATEGORY: ProfileCategory = $category;
}
const ANON: bool = is_anon!([$($modifiers)*]);
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
+ type Cache = <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache;
+
#[inline(always)]
fn query(key: Self::Key) -> Query<'tcx> {
Query::$name(key)
}
#[inline(always)]
- fn query_cache<'a>(tcx: TyCtxt<$tcx>) -> &'a Sharded<QueryCache<$tcx, Self>> {
+ fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<$tcx, Self> {
&tcx.queries.$name
}
providers: IndexVec<CrateNum, Providers<$tcx>>,
fallback_extern_providers: Box<Providers<$tcx>>,
- $($(#[$attr])* $name: Sharded<QueryCache<$tcx, queries::$name<$tcx>>>,)*
+ $($(#[$attr])* $name: QueryState<$tcx, queries::$name<$tcx>>,)*
}
};
}
use crate::hir::map::definitions::DefPathData;
use crate::ty::context::TyCtxt;
-use crate::ty::query::config::QueryConfig;
-use crate::ty::query::plumbing::QueryCache;
+use crate::ty::query::config::QueryAccessors;
+use crate::ty::query::plumbing::QueryState;
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
-use rustc_data_structures::sharded::Sharded;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
use std::fmt::Debug;
use std::io::Write;
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, Q>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
- query_cache: &Sharded<QueryCache<'tcx, Q>>,
+ query_state: &QueryState<'tcx, Q>,
string_cache: &mut QueryKeyStringCache,
) where
- Q: QueryConfig<'tcx>,
+ Q: QueryAccessors<'tcx>,
{
tcx.prof.with_profiler(|profiler| {
let event_id_builder = profiler.event_id_builder();
// need to invoke queries itself, we cannot keep the query caches
// locked while doing so. Instead we copy out the
// `(query_key, dep_node_index)` pairs and release the lock again.
- let query_keys_and_indices = {
- let shards = query_cache.lock_shards();
- let len = shards.iter().map(|shard| shard.results.len()).sum();
-
- let mut query_keys_and_indices = Vec::with_capacity(len);
-
- for shard in &shards {
- query_keys_and_indices.extend(
- shard.results.iter().map(|(q_key, q_val)| (q_key.clone(), q_val.index)),
- );
- }
-
- query_keys_and_indices
- };
+ let query_keys_and_indices: Vec<_> = query_state
+ .iter_results(|results| results.map(|(k, _, i)| (k.clone(), i)).collect());
// Now actually allocate the strings. If allocating the strings
// generates new entries in the query cache, we'll miss them but
let query_name = profiler.get_or_alloc_cached_string(query_name);
let event_id = event_id_builder.from_label(query_name).to_string_id();
- let shards = query_cache.lock_shards();
+ query_state.iter_results(|results| {
+ let query_invocation_ids: Vec<_> = results.map(|v| v.2.into()).collect();
- for shard in shards.iter() {
- let query_invocation_ids = shard
- .results
- .values()
- .map(|v| v.index)
- .map(|dep_node_index| dep_node_index.into());
-
- profiler
- .bulk_map_query_invocation_id_to_single_string(query_invocation_ids, event_id);
- }
+ profiler.bulk_map_query_invocation_id_to_single_string(
+ query_invocation_ids.into_iter(),
+ event_id,
+ );
+ });
}
});
}