use syntax_pos::{BytePos, Span, DUMMY_SP, FileMap};
use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo};
use ty;
-use ty::maps::job::QueryResult;
use ty::codec::{self as ty_codec, TyDecoder, TyEncoder};
use ty::context::TyCtxt;
use util::common::time;
// const eval is special, it only encodes successfully evaluated constants
use ty::maps::QueryConfig;
- for (key, entry) in const_eval::query_map(tcx).borrow().map.iter() {
+ let map = const_eval::query_map(tcx).borrow();
+ assert!(map.active.is_empty());
+ for (key, entry) in map.results.iter() {
use ty::maps::config::QueryDescription;
if const_eval::cache_on_disk(key.clone()) {
- let entry = match *entry {
- QueryResult::Complete(ref v) => v,
- _ => panic!("incomplete query"),
- };
if let Ok(ref value) = entry.value {
let dep_node = SerializedDepNodeIndex::new(entry.index.index());
time(tcx.sess, desc, || {
- for (key, entry) in Q::query_map(tcx).borrow().map.iter() {
+ let map = Q::query_map(tcx).borrow();
+ assert!(map.active.is_empty());
+ for (key, entry) in map.results.iter() {
if Q::cache_on_disk(key.clone()) {
- let entry = match *entry {
- QueryResult::Complete(ref v) => v,
- _ => panic!("incomplete query"),
- };
let dep_node = SerializedDepNodeIndex::new(entry.index.index());
// Record position of the cache entry
use syntax::codemap::DUMMY_SP;
pub struct QueryMap<'tcx, D: QueryConfig<'tcx> + ?Sized> {
- pub(super) map: FxHashMap<D::Key, QueryResult<'tcx, QueryValue<D::Value>>>,
+ pub(super) results: FxHashMap<D::Key, QueryValue<D::Value>>,
+ pub(super) active: FxHashMap<D::Key, QueryResult<'tcx>>,
}
pub(super) struct QueryValue<T> {
impl<'tcx, M: QueryConfig<'tcx>> QueryMap<'tcx, M> {
pub(super) fn new() -> QueryMap<'tcx, M> {
QueryMap {
- map: FxHashMap(),
+ results: FxHashMap(),
+ active: FxHashMap(),
}
}
}
let map = Q::query_map(tcx);
loop {
let mut lock = map.borrow_mut();
- let job = match lock.map.entry((*key).clone()) {
+ if let Some(value) = lock.results.get(key) {
+ profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
+ let result = Ok((value.value.clone(), value.index));
+ return TryGetJob::JobCompleted(result);
+ }
+ let job = match lock.active.entry((*key).clone()) {
Entry::Occupied(entry) => {
match *entry.get() {
QueryResult::Started(ref job) => job.clone(),
- QueryResult::Complete(ref value) => {
- profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
- let result = Ok((value.value.clone(), value.index));
- return TryGetJob::JobCompleted(result);
- },
QueryResult::Poisoned => FatalError.raise(),
}
}
mem::forget(self);
let value = QueryValue::new(result.clone(), dep_node_index);
- map.borrow_mut().map.insert(key, QueryResult::Complete(value));
+ {
+ let mut lock = map.borrow_mut();
+ lock.active.remove(&key);
+ lock.results.insert(key, value);
+ }
job.signal_complete();
}
impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic
- self.map.borrow_mut().map.insert(self.key.clone(), QueryResult::Poisoned);
+ self.map.borrow_mut().active.insert(self.key.clone(), QueryResult::Poisoned);
// Also signal the completion of the job, so waiters
// will continue execution
self.job.signal_complete();