+++ /dev/null
-To learn more about how dependency tracking works in rustc, see the [rustc
-guide].
-
-[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
+++ /dev/null
-//! Code for debugging the dep-graph.
-
-use super::dep_node::DepNode;
-use std::error::Error;
-
-/// A dep-node filter goes from a user-defined string to a query over
-/// nodes. Right now the format is like this:
-///
-/// x & y & z
-///
-/// where the format-string of the dep-node must contain `x`, `y`, and
-/// `z`.
-#[derive(Debug)]
-pub struct DepNodeFilter {
- text: String,
-}
-
-impl DepNodeFilter {
- pub fn new(text: &str) -> Self {
- DepNodeFilter { text: text.trim().to_string() }
- }
-
- /// Returns `true` if all nodes always pass the filter.
- pub fn accepts_all(&self) -> bool {
- self.text.is_empty()
- }
-
- /// Tests whether `node` meets the filter, returning true if so.
- pub fn test(&self, node: &DepNode) -> bool {
- let debug_str = format!("{:?}", node);
- self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
- }
-}
-
-/// A filter like `F -> G` where `F` and `G` are valid dep-node
-/// filters. This can be used to test the source/target independently.
-pub struct EdgeFilter {
- pub source: DepNodeFilter,
- pub target: DepNodeFilter,
-}
-
-impl EdgeFilter {
- pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
- let parts: Vec<_> = test.split("->").collect();
- if parts.len() != 2 {
- Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into())
- } else {
- Ok(EdgeFilter {
- source: DepNodeFilter::new(parts[0]),
- target: DepNodeFilter::new(parts[1]),
- })
- }
- }
-
- pub fn test(&self, source: &DepNode, target: &DepNode) -> bool {
- self.source.test(source) && self.target.test(target)
- }
-}
+++ /dev/null
-use crate::ty::{self, TyCtxt};
-use parking_lot::{Condvar, Mutex};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_data_structures::profiling::QueryInvocationId;
-use rustc_data_structures::sharded::{self, Sharded};
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
-use rustc_errors::Diagnostic;
-use rustc_hir::def_id::DefId;
-use rustc_index::vec::{Idx, IndexVec};
-use smallvec::SmallVec;
-use std::collections::hash_map::Entry;
-use std::env;
-use std::hash::Hash;
-use std::mem;
-use std::sync::atomic::Ordering::Relaxed;
-
-use crate::ich::{Fingerprint, StableHashingContext, StableHashingContextProvider};
-
-use super::debug::EdgeFilter;
-use super::dep_node::{DepKind, DepNode, WorkProductId};
-use super::prev::PreviousDepGraph;
-use super::query::DepGraphQuery;
-use super::safe::DepGraphSafe;
-use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
-
-#[derive(Clone)]
-pub struct DepGraph {
- data: Option<Lrc<DepGraphData>>,
-
- /// This field is used for assigning DepNodeIndices when running in
- /// non-incremental mode. Even in non-incremental mode we make sure that
- /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
- /// ID is used for self-profiling.
- virtual_dep_node_index: Lrc<AtomicU32>,
-}
-
-rustc_index::newtype_index! {
- pub struct DepNodeIndex { .. }
-}
-
-impl DepNodeIndex {
- pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
-}
-
-impl std::convert::From<DepNodeIndex> for QueryInvocationId {
- #[inline]
- fn from(dep_node_index: DepNodeIndex) -> Self {
- QueryInvocationId(dep_node_index.as_u32())
- }
-}
-
-#[derive(PartialEq)]
-pub enum DepNodeColor {
- Red,
- Green(DepNodeIndex),
-}
-
-impl DepNodeColor {
- pub fn is_green(self) -> bool {
- match self {
- DepNodeColor::Red => false,
- DepNodeColor::Green(_) => true,
- }
- }
-}
-
-struct DepGraphData {
- /// The new encoding of the dependency graph, optimized for red/green
- /// tracking. The `current` field is the dependency graph of only the
- /// current compilation session: We don't merge the previous dep-graph into
- /// current one anymore.
- current: CurrentDepGraph,
-
- /// The dep-graph from the previous compilation session. It contains all
- /// nodes and edges as well as all fingerprints of nodes that have them.
- previous: PreviousDepGraph,
-
- colors: DepNodeColorMap,
-
- /// A set of loaded diagnostics that is in the progress of being emitted.
- emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
-
- /// Used to wait for diagnostics to be emitted.
- emitting_diagnostics_cond_var: Condvar,
-
- /// When we load, there may be `.o` files, cached MIR, or other such
- /// things available to us. If we find that they are not dirty, we
- /// load the path to the file storing those work-products here into
- /// this map. We can later look for and extract that data.
- previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
-
- dep_node_debug: Lock<FxHashMap<DepNode, String>>,
-}
-
-pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
-where
- R: for<'a> HashStable<StableHashingContext<'a>>,
-{
- let mut stable_hasher = StableHasher::new();
- result.hash_stable(hcx, &mut stable_hasher);
-
- Some(stable_hasher.finish())
-}
-
-impl DepGraph {
- pub fn new(
- prev_graph: PreviousDepGraph,
- prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
- ) -> DepGraph {
- let prev_graph_node_count = prev_graph.node_count();
-
- DepGraph {
- data: Some(Lrc::new(DepGraphData {
- previous_work_products: prev_work_products,
- dep_node_debug: Default::default(),
- current: CurrentDepGraph::new(prev_graph_node_count),
- emitting_diagnostics: Default::default(),
- emitting_diagnostics_cond_var: Condvar::new(),
- previous: prev_graph,
- colors: DepNodeColorMap::new(prev_graph_node_count),
- })),
- virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
- }
- }
-
- pub fn new_disabled() -> DepGraph {
- DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
- }
-
- /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
- #[inline]
- pub fn is_fully_enabled(&self) -> bool {
- self.data.is_some()
- }
-
- pub fn query(&self) -> DepGraphQuery {
- let data = self.data.as_ref().unwrap().current.data.lock();
- let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
- let mut edges = Vec::new();
- for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
- for &edge_target in edge_targets.iter() {
- let to = data[edge_target].node;
- edges.push((from, to));
- }
- }
-
- DepGraphQuery::new(&nodes[..], &edges[..])
- }
-
- pub fn assert_ignored(&self) {
- if let Some(..) = self.data {
- ty::tls::with_context_opt(|icx| {
- let icx = if let Some(icx) = icx { icx } else { return };
- assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
- })
- }
- }
-
- pub fn with_ignore<OP, R>(&self, op: OP) -> R
- where
- OP: FnOnce() -> R,
- {
- ty::tls::with_context(|icx| {
- let icx = ty::tls::ImplicitCtxt { task_deps: None, ..icx.clone() };
-
- ty::tls::enter_context(&icx, |_| op())
- })
- }
-
- /// Starts a new dep-graph task. Dep-graph tasks are specified
- /// using a free function (`task`) and **not** a closure -- this
- /// is intentional because we want to exercise tight control over
- /// what state they have access to. In particular, we want to
- /// prevent implicit 'leaks' of tracked state into the task (which
- /// could then be read without generating correct edges in the
- /// dep-graph -- see the [rustc dev guide] for more details on
- /// the dep-graph). To this end, the task function gets exactly two
- /// pieces of state: the context `cx` and an argument `arg`. Both
- /// of these bits of state must be of some type that implements
- /// `DepGraphSafe` and hence does not leak.
- ///
- /// The choice of two arguments is not fundamental. One argument
- /// would work just as well, since multiple values can be
- /// collected using tuples. However, using two arguments works out
- /// to be quite convenient, since it is common to need a context
- /// (`cx`) and some argument (e.g., a `DefId` identifying what
- /// item to process).
- ///
- /// For cases where you need some other number of arguments:
- ///
- /// - If you only need one argument, just use `()` for the `arg`
- /// parameter.
- /// - If you need 3+ arguments, use a tuple for the
- /// `arg` parameter.
- ///
- /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
- pub fn with_task<'a, C, A, R>(
- &self,
- key: DepNode,
- cx: C,
- arg: A,
- task: fn(C, A) -> R,
- hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
- ) -> (R, DepNodeIndex)
- where
- C: DepGraphSafe + StableHashingContextProvider<'a>,
- {
- self.with_task_impl(
- key,
- cx,
- arg,
- false,
- task,
- |_key| {
- Some(TaskDeps {
- #[cfg(debug_assertions)]
- node: Some(_key),
- reads: SmallVec::new(),
- read_set: Default::default(),
- })
- },
- |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint),
- hash_result,
- )
- }
-
- fn with_task_impl<'a, C, A, R>(
- &self,
- key: DepNode,
- cx: C,
- arg: A,
- no_tcx: bool,
- task: fn(C, A) -> R,
- create_task: fn(DepNode) -> Option<TaskDeps>,
- finish_task_and_alloc_depnode: fn(
- &CurrentDepGraph,
- DepNode,
- Fingerprint,
- Option<TaskDeps>,
- ) -> DepNodeIndex,
- hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
- ) -> (R, DepNodeIndex)
- where
- C: DepGraphSafe + StableHashingContextProvider<'a>,
- {
- if let Some(ref data) = self.data {
- let task_deps = create_task(key).map(Lock::new);
-
- // In incremental mode, hash the result of the task. We don't
- // do anything with the hash yet, but we are computing it
- // anyway so that
- // - we make sure that the infrastructure works and
- // - we can get an idea of the runtime cost.
- let mut hcx = cx.get_stable_hashing_context();
-
- let result = if no_tcx {
- task(cx, arg)
- } else {
- ty::tls::with_context(|icx| {
- let icx =
- ty::tls::ImplicitCtxt { task_deps: task_deps.as_ref(), ..icx.clone() };
-
- ty::tls::enter_context(&icx, |_| task(cx, arg))
- })
- };
-
- let current_fingerprint = hash_result(&mut hcx, &result);
-
- let dep_node_index = finish_task_and_alloc_depnode(
- &data.current,
- key,
- current_fingerprint.unwrap_or(Fingerprint::ZERO),
- task_deps.map(|lock| lock.into_inner()),
- );
-
- let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks;
-
- // Determine the color of the new DepNode.
- if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
- let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
-
- let color = if let Some(current_fingerprint) = current_fingerprint {
- if current_fingerprint == prev_fingerprint {
- if print_status {
- eprintln!("[task::green] {:?}", key);
- }
- DepNodeColor::Green(dep_node_index)
- } else {
- if print_status {
- eprintln!("[task::red] {:?}", key);
- }
- DepNodeColor::Red
- }
- } else {
- if print_status {
- eprintln!("[task::unknown] {:?}", key);
- }
- // Mark the node as Red if we can't hash the result
- DepNodeColor::Red
- };
-
- debug_assert!(
- data.colors.get(prev_index).is_none(),
- "DepGraph::with_task() - Duplicate DepNodeColor \
- insertion for {:?}",
- key
- );
-
- data.colors.insert(prev_index, color);
- } else {
- if print_status {
- eprintln!("[task::new] {:?}", key);
- }
- }
-
- (result, dep_node_index)
- } else {
- (task(cx, arg), self.next_virtual_depnode_index())
- }
- }
-
- /// Executes something within an "anonymous" task, that is, a task the
- /// `DepNode` of which is determined by the list of inputs it read from.
- pub fn with_anon_task<OP, R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
- where
- OP: FnOnce() -> R,
- {
- if let Some(ref data) = self.data {
- let (result, task_deps) = ty::tls::with_context(|icx| {
- let task_deps = Lock::new(TaskDeps::default());
-
- let r = {
- let icx = ty::tls::ImplicitCtxt { task_deps: Some(&task_deps), ..icx.clone() };
-
- ty::tls::enter_context(&icx, |_| op())
- };
-
- (r, task_deps.into_inner())
- });
- let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps);
- (result, dep_node_index)
- } else {
- (op(), self.next_virtual_depnode_index())
- }
- }
-
- /// Executes something within an "eval-always" task which is a task
- /// that runs whenever anything changes.
- pub fn with_eval_always_task<'a, C, A, R>(
- &self,
- key: DepNode,
- cx: C,
- arg: A,
- task: fn(C, A) -> R,
- hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
- ) -> (R, DepNodeIndex)
- where
- C: DepGraphSafe + StableHashingContextProvider<'a>,
- {
- self.with_task_impl(
- key,
- cx,
- arg,
- false,
- task,
- |_| None,
- |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint),
- hash_result,
- )
- }
-
- #[inline]
- pub fn read(&self, v: DepNode) {
- if let Some(ref data) = self.data {
- let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
- if let Some(dep_node_index) = map.get(&v).copied() {
- std::mem::drop(map);
- data.read_index(dep_node_index);
- } else {
- bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
- }
- }
- }
-
- #[inline]
- pub fn read_index(&self, dep_node_index: DepNodeIndex) {
- if let Some(ref data) = self.data {
- data.read_index(dep_node_index);
- }
- }
-
- #[inline]
- pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
- self.data
- .as_ref()
- .unwrap()
- .current
- .node_to_node_index
- .get_shard_by_value(dep_node)
- .lock()
- .get(dep_node)
- .cloned()
- .unwrap()
- }
-
- #[inline]
- pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
- if let Some(ref data) = self.data {
- data.current
- .node_to_node_index
- .get_shard_by_value(&dep_node)
- .lock()
- .contains_key(dep_node)
- } else {
- false
- }
- }
-
- #[inline]
- pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
- let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
- data[dep_node_index].fingerprint
- }
-
- pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
- self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
- }
-
- #[inline]
- pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
- self.data.as_ref().unwrap().previous.node_to_index(dep_node)
- }
-
- /// Checks whether a previous work product exists for `v` and, if
- /// so, return the path that leads to it. Used to skip doing work.
- pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
- self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
- }
-
- /// Access the map of work-products created during the cached run. Only
- /// used during saving of the dep-graph.
- pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
- &self.data.as_ref().unwrap().previous_work_products
- }
-
- #[inline(always)]
- pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
- where
- F: FnOnce() -> String,
- {
- let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
-
- if dep_node_debug.borrow().contains_key(&dep_node) {
- return;
- }
- let debug_str = debug_str_gen();
- dep_node_debug.borrow_mut().insert(dep_node, debug_str);
- }
-
- pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
- self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
- }
-
- pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
- if cfg!(debug_assertions) {
- let current_dep_graph = &self.data.as_ref().unwrap().current;
-
- Some((
- current_dep_graph.total_read_count.load(Relaxed),
- current_dep_graph.total_duplicate_read_count.load(Relaxed),
- ))
- } else {
- None
- }
- }
-
- pub fn serialize(&self) -> SerializedDepGraph {
- let data = self.data.as_ref().unwrap().current.data.lock();
-
- let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
- data.iter().map(|d| d.fingerprint).collect();
- let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect();
-
- let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
-
- let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
- let mut edge_list_data = Vec::with_capacity(total_edge_count);
-
- for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
- let start = edge_list_data.len() as u32;
- // This should really just be a memcpy :/
- edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
- let end = edge_list_data.len() as u32;
-
- debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
- edge_list_indices.push((start, end));
- }
-
- debug_assert!(edge_list_data.len() <= u32::MAX as usize);
- debug_assert_eq!(edge_list_data.len(), total_edge_count);
-
- SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
- }
-
- pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
- if let Some(ref data) = self.data {
- if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
- return data.colors.get(prev_index);
- } else {
- // This is a node that did not exist in the previous compilation
- // session, so we consider it to be red.
- return Some(DepNodeColor::Red);
- }
- }
-
- None
- }
-
- /// Try to read a node index for the node dep_node.
- /// A node will have an index, when it's already been marked green, or when we can mark it
- /// green. This function will mark the current task as a reader of the specified node, when
- /// a node index can be found for that node.
- pub fn try_mark_green_and_read(
- &self,
- tcx: TyCtxt<'_>,
- dep_node: &DepNode,
- ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
- self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
- debug_assert!(self.is_green(&dep_node));
- self.read_index(dep_node_index);
- (prev_index, dep_node_index)
- })
- }
-
- pub fn try_mark_green(
- &self,
- tcx: TyCtxt<'_>,
- dep_node: &DepNode,
- ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
- debug_assert!(!dep_node.kind.is_eval_always());
-
- // Return None if the dep graph is disabled
- let data = self.data.as_ref()?;
-
- // Return None if the dep node didn't exist in the previous session
- let prev_index = data.previous.node_to_index_opt(dep_node)?;
-
- match data.colors.get(prev_index) {
- Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
- Some(DepNodeColor::Red) => None,
- None => {
- // This DepNode and the corresponding query invocation existed
- // in the previous compilation session too, so we can try to
- // mark it as green by recursively marking all of its
- // dependencies green.
- self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
- .map(|dep_node_index| (prev_index, dep_node_index))
- }
- }
- }
-
- /// Try to mark a dep-node which existed in the previous compilation session as green.
- fn try_mark_previous_green<'tcx>(
- &self,
- tcx: TyCtxt<'tcx>,
- data: &DepGraphData,
- prev_dep_node_index: SerializedDepNodeIndex,
- dep_node: &DepNode,
- ) -> Option<DepNodeIndex> {
- debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
-
- #[cfg(not(parallel_compiler))]
- {
- debug_assert!(
- !data
- .current
- .node_to_node_index
- .get_shard_by_value(dep_node)
- .lock()
- .contains_key(dep_node)
- );
- debug_assert!(data.colors.get(prev_dep_node_index).is_none());
- }
-
- // We never try to mark eval_always nodes as green
- debug_assert!(!dep_node.kind.is_eval_always());
-
- debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
-
- let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
-
- let mut current_deps = SmallVec::new();
-
- for &dep_dep_node_index in prev_deps {
- let dep_dep_node_color = data.colors.get(dep_dep_node_index);
-
- match dep_dep_node_color {
- Some(DepNodeColor::Green(node_index)) => {
- // This dependency has been marked as green before, we are
- // still fine and can continue with checking the other
- // dependencies.
- debug!(
- "try_mark_previous_green({:?}) --- found dependency {:?} to \
- be immediately green",
- dep_node,
- data.previous.index_to_node(dep_dep_node_index)
- );
- current_deps.push(node_index);
- }
- Some(DepNodeColor::Red) => {
- // We found a dependency the value of which has changed
- // compared to the previous compilation session. We cannot
- // mark the DepNode as green and also don't need to bother
- // with checking any of the other dependencies.
- debug!(
- "try_mark_previous_green({:?}) - END - dependency {:?} was \
- immediately red",
- dep_node,
- data.previous.index_to_node(dep_dep_node_index)
- );
- return None;
- }
- None => {
- let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
-
- // We don't know the state of this dependency. If it isn't
- // an eval_always node, let's try to mark it green recursively.
- if !dep_dep_node.kind.is_eval_always() {
- debug!(
- "try_mark_previous_green({:?}) --- state of dependency {:?} \
- is unknown, trying to mark it green",
- dep_node, dep_dep_node
- );
-
- let node_index = self.try_mark_previous_green(
- tcx,
- data,
- dep_dep_node_index,
- dep_dep_node,
- );
- if let Some(node_index) = node_index {
- debug!(
- "try_mark_previous_green({:?}) --- managed to MARK \
- dependency {:?} as green",
- dep_node, dep_dep_node
- );
- current_deps.push(node_index);
- continue;
- }
- } else {
- // FIXME: This match is just a workaround for incremental bugs and should
- // be removed. https://github.com/rust-lang/rust/issues/62649 is one such
- // bug that must be fixed before removing this.
- match dep_dep_node.kind {
- DepKind::hir_owner
- | DepKind::hir_owner_nodes
- | DepKind::CrateMetadata => {
- if let Some(def_id) = dep_dep_node.extract_def_id(tcx) {
- if def_id_corresponds_to_hir_dep_node(tcx, def_id) {
- if dep_dep_node.kind == DepKind::CrateMetadata {
- // The `DefPath` has corresponding node,
- // and that node should have been marked
- // either red or green in `data.colors`.
- bug!(
- "DepNode {:?} should have been \
- pre-marked as red or green but wasn't.",
- dep_dep_node
- );
- }
- } else {
- // This `DefPath` does not have a
- // corresponding `DepNode` (e.g. a
- // struct field), and the ` DefPath`
- // collided with the `DefPath` of a
- // proper item that existed in the
- // previous compilation session.
- //
- // Since the given `DefPath` does not
- // denote the item that previously
- // existed, we just fail to mark green.
- return None;
- }
- } else {
- // If the node does not exist anymore, we
- // just fail to mark green.
- return None;
- }
- }
- _ => {
- // For other kinds of nodes it's OK to be
- // forced.
- }
- }
- }
-
- // We failed to mark it green, so we try to force the query.
- debug!(
- "try_mark_previous_green({:?}) --- trying to force \
- dependency {:?}",
- dep_node, dep_dep_node
- );
- if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
- let dep_dep_node_color = data.colors.get(dep_dep_node_index);
-
- match dep_dep_node_color {
- Some(DepNodeColor::Green(node_index)) => {
- debug!(
- "try_mark_previous_green({:?}) --- managed to \
- FORCE dependency {:?} to green",
- dep_node, dep_dep_node
- );
- current_deps.push(node_index);
- }
- Some(DepNodeColor::Red) => {
- debug!(
- "try_mark_previous_green({:?}) - END - \
- dependency {:?} was red after forcing",
- dep_node, dep_dep_node
- );
- return None;
- }
- None => {
- if !tcx.sess.has_errors_or_delayed_span_bugs() {
- bug!(
- "try_mark_previous_green() - Forcing the DepNode \
- should have set its color"
- )
- } else {
- // If the query we just forced has resulted in
- // some kind of compilation error, we cannot rely on
- // the dep-node color having been properly updated.
- // This means that the query system has reached an
- // invalid state. We let the compiler continue (by
- // returning `None`) so it can emit error messages
- // and wind down, but rely on the fact that this
- // invalid state will not be persisted to the
- // incremental compilation cache because of
- // compilation errors being present.
- debug!(
- "try_mark_previous_green({:?}) - END - \
- dependency {:?} resulted in compilation error",
- dep_node, dep_dep_node
- );
- return None;
- }
- }
- }
- } else {
- // The DepNode could not be forced.
- debug!(
- "try_mark_previous_green({:?}) - END - dependency {:?} \
- could not be forced",
- dep_node, dep_dep_node
- );
- return None;
- }
- }
- }
- }
-
- // If we got here without hitting a `return` that means that all
- // dependencies of this DepNode could be marked as green. Therefore we
- // can also mark this DepNode as green.
-
- // There may be multiple threads trying to mark the same dep node green concurrently
-
- let dep_node_index = {
- // Copy the fingerprint from the previous graph,
- // so we don't have to recompute it
- let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
-
- // We allocating an entry for the node in the current dependency graph and
- // adding all the appropriate edges imported from the previous graph
- data.current.intern_node(*dep_node, current_deps, fingerprint)
- };
-
- // ... emitting any stored diagnostic ...
-
- // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
- // Maybe store a list on disk and encode this fact in the DepNodeState
- let diagnostics = tcx.queries.on_disk_cache.load_diagnostics(tcx, prev_dep_node_index);
-
- #[cfg(not(parallel_compiler))]
- debug_assert!(
- data.colors.get(prev_dep_node_index).is_none(),
- "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
- insertion for {:?}",
- dep_node
- );
-
- if unlikely!(!diagnostics.is_empty()) {
- self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
- }
-
- // ... and finally storing a "Green" entry in the color map.
- // Multiple threads can all write the same color here
- data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
-
- debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
- Some(dep_node_index)
- }
-
- /// Atomically emits some loaded diagnostics.
- /// This may be called concurrently on multiple threads for the same dep node.
- #[cold]
- #[inline(never)]
- fn emit_diagnostics<'tcx>(
- &self,
- tcx: TyCtxt<'tcx>,
- data: &DepGraphData,
- dep_node_index: DepNodeIndex,
- prev_dep_node_index: SerializedDepNodeIndex,
- diagnostics: Vec<Diagnostic>,
- ) {
- let mut emitting = data.emitting_diagnostics.lock();
-
- if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
- // The node is already green so diagnostics must have been emitted already
- return;
- }
-
- if emitting.insert(dep_node_index) {
- // We were the first to insert the node in the set so this thread
- // must emit the diagnostics and signal other potentially waiting
- // threads after.
- mem::drop(emitting);
-
- // Promote the previous diagnostics to the current session.
- tcx.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics.clone().into());
-
- let handle = tcx.sess.diagnostic();
-
- for diagnostic in diagnostics {
- handle.emit_diagnostic(&diagnostic);
- }
-
- // Mark the node as green now that diagnostics are emitted
- data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
-
- // Remove the node from the set
- data.emitting_diagnostics.lock().remove(&dep_node_index);
-
- // Wake up waiters
- data.emitting_diagnostics_cond_var.notify_all();
- } else {
- // We must wait for the other thread to finish emitting the diagnostic
-
- loop {
- data.emitting_diagnostics_cond_var.wait(&mut emitting);
- if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
- {
- break;
- }
- }
- }
- }
-
- // Returns true if the given node has been marked as green during the
- // current compilation session. Used in various assertions
- pub fn is_green(&self, dep_node: &DepNode) -> bool {
- self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
- }
-
- // This method loads all on-disk cacheable query results into memory, so
- // they can be written out to the new cache file again. Most query results
- // will already be in memory but in the case where we marked something as
- // green but then did not need the value, that value will never have been
- // loaded from disk.
- //
- // This method will only load queries that will end up in the disk cache.
- // Other queries will not be executed.
- pub fn exec_cache_promotions(&self, tcx: TyCtxt<'_>) {
- let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
-
- let data = self.data.as_ref().unwrap();
- for prev_index in data.colors.values.indices() {
- match data.colors.get(prev_index) {
- Some(DepNodeColor::Green(_)) => {
- let dep_node = data.previous.index_to_node(prev_index);
- dep_node.try_load_from_on_disk_cache(tcx);
- }
- None | Some(DepNodeColor::Red) => {
- // We can skip red nodes because a node can only be marked
- // as red if the query result was recomputed and thus is
- // already in memory.
- }
- }
- }
- }
-
- fn next_virtual_depnode_index(&self) -> DepNodeIndex {
- let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
- DepNodeIndex::from_u32(index)
- }
-}
-
-fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
- let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
- def_id.index == hir_id.owner.local_def_index
-}
-
-/// A "work product" is an intermediate result that we save into the
-/// incremental directory for later re-use. The primary example are
-/// the object files that we save for each partition at code
-/// generation time.
-///
-/// Each work product is associated with a dep-node, representing the
-/// process that produced the work-product. If that dep-node is found
-/// to be dirty when we load up, then we will delete the work-product
-/// at load time. If the work-product is found to be clean, then we
-/// will keep a record in the `previous_work_products` list.
-///
-/// In addition, work products have an associated hash. This hash is
-/// an extra hash that can be used to decide if the work-product from
-/// a previous compilation can be re-used (in addition to the dirty
-/// edges check).
-///
-/// As the primary example, consider the object files we generate for
-/// each partition. In the first run, we create partitions based on
-/// the symbols that need to be compiled. For each partition P, we
-/// hash the symbols in P and create a `WorkProduct` record associated
-/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
-/// in P.
-///
-/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
-/// judged to be clean (which means none of the things we read to
-/// generate the partition were found to be dirty), it will be loaded
-/// into previous work products. We will then regenerate the set of
-/// symbols in the partition P and hash them (note that new symbols
-/// may be added -- for example, new monomorphizations -- even if
-/// nothing in P changed!). We will compare that hash against the
-/// previous hash. If it matches up, we can reuse the object file.
-#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
-pub struct WorkProduct {
- pub cgu_name: String,
- /// Saved files associated with this CGU.
- pub saved_files: Vec<(WorkProductFileKind, String)>,
-}
-
-#[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
-pub enum WorkProductFileKind {
- Object,
- Bytecode,
- BytecodeCompressed,
-}
-
-#[derive(Clone)]
-struct DepNodeData {
- node: DepNode,
- edges: EdgesVec,
- fingerprint: Fingerprint,
-}
-
-/// `CurrentDepGraph` stores the dependency graph for the current session.
-/// It will be populated as we run queries or tasks.
-///
-/// The nodes in it are identified by an index (`DepNodeIndex`).
-/// The data for each node is stored in its `DepNodeData`, found in the `data` field.
-///
-/// We never remove nodes from the graph: they are only added.
-///
-/// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
-/// locked separately. Operations that take a `DepNodeIndex` typically just access
-/// the data field.
-///
-/// The only operation that must manipulate both locks is adding new nodes, in which case
-/// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
-/// acquire the lock on `data.`
-pub(super) struct CurrentDepGraph {
- data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
- node_to_node_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
-
- /// Used to trap when a specific edge is added to the graph.
- /// This is used for debug purposes and is only active with `debug_assertions`.
- #[allow(dead_code)]
- forbidden_edge: Option<EdgeFilter>,
-
- /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
- /// their edges. This has the beneficial side-effect that multiple anonymous
- /// nodes can be coalesced into one without changing the semantics of the
- /// dependency graph. However, the merging of nodes can lead to a subtle
- /// problem during red-green marking: The color of an anonymous node from
- /// the current session might "shadow" the color of the node with the same
- /// ID from the previous session. In order to side-step this problem, we make
- /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
- /// This is implemented by mixing a session-key into the ID fingerprint of
- /// each anon node. The session-key is just a random number generated when
- /// the `DepGraph` is created.
- anon_id_seed: Fingerprint,
-
- /// These are simple counters that are for profiling and
- /// debugging and only active with `debug_assertions`.
- total_read_count: AtomicU64,
- total_duplicate_read_count: AtomicU64,
-}
-
-impl CurrentDepGraph {
- fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
- use std::time::{SystemTime, UNIX_EPOCH};
-
- let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
- let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
- let mut stable_hasher = StableHasher::new();
- nanos.hash(&mut stable_hasher);
-
- let forbidden_edge = if cfg!(debug_assertions) {
- match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
- Ok(s) => match EdgeFilter::new(&s) {
- Ok(f) => Some(f),
- Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
- },
- Err(_) => None,
- }
- } else {
- None
- };
-
- // Pre-allocate the dep node structures. We over-allocate a little so
- // that we hopefully don't have to re-allocate during this compilation
- // session. The over-allocation is 2% plus a small constant to account
- // for the fact that in very small crates 2% might not be enough.
- let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
-
- CurrentDepGraph {
- data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
- node_to_node_index: Sharded::new(|| {
- FxHashMap::with_capacity_and_hasher(
- new_node_count_estimate / sharded::SHARDS,
- Default::default(),
- )
- }),
- anon_id_seed: stable_hasher.finish(),
- forbidden_edge,
- total_read_count: AtomicU64::new(0),
- total_duplicate_read_count: AtomicU64::new(0),
- }
- }
-
- fn complete_task(
- &self,
- node: DepNode,
- task_deps: TaskDeps,
- fingerprint: Fingerprint,
- ) -> DepNodeIndex {
- self.alloc_node(node, task_deps.reads, fingerprint)
- }
-
- fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
- debug_assert!(!kind.is_eval_always());
-
- let mut hasher = StableHasher::new();
-
- // The dep node indices are hashed here instead of hashing the dep nodes of the
- // dependencies. These indices may refer to different nodes per session, but this isn't
- // a problem here because we that ensure the final dep node hash is per session only by
- // combining it with the per session random number `anon_id_seed`. This hash only need
- // to map the dependencies to a single value on a per session basis.
- task_deps.reads.hash(&mut hasher);
-
- let target_dep_node = DepNode {
- kind,
-
- // Fingerprint::combine() is faster than sending Fingerprint
- // through the StableHasher (at least as long as StableHasher
- // is so slow).
- hash: self.anon_id_seed.combine(hasher.finish()),
- };
-
- self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
- }
-
- fn alloc_node(
- &self,
- dep_node: DepNode,
- edges: EdgesVec,
- fingerprint: Fingerprint,
- ) -> DepNodeIndex {
- debug_assert!(
- !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node)
- );
- self.intern_node(dep_node, edges, fingerprint)
- }
-
- fn intern_node(
- &self,
- dep_node: DepNode,
- edges: EdgesVec,
- fingerprint: Fingerprint,
- ) -> DepNodeIndex {
- match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
- Entry::Occupied(entry) => *entry.get(),
- Entry::Vacant(entry) => {
- let mut data = self.data.lock();
- let dep_node_index = DepNodeIndex::new(data.len());
- data.push(DepNodeData { node: dep_node, edges, fingerprint });
- entry.insert(dep_node_index);
- dep_node_index
- }
- }
- }
-}
-
-impl DepGraphData {
- #[inline(never)]
- fn read_index(&self, source: DepNodeIndex) {
- ty::tls::with_context_opt(|icx| {
- let icx = if let Some(icx) = icx { icx } else { return };
- if let Some(task_deps) = icx.task_deps {
- let mut task_deps = task_deps.lock();
- let task_deps = &mut *task_deps;
- if cfg!(debug_assertions) {
- self.current.total_read_count.fetch_add(1, Relaxed);
- }
-
- // As long as we only have a low number of reads we can avoid doing a hash
- // insert and potentially allocating/reallocating the hashmap
- let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
- task_deps.reads.iter().all(|other| *other != source)
- } else {
- task_deps.read_set.insert(source)
- };
- if new_read {
- task_deps.reads.push(source);
- if task_deps.reads.len() == TASK_DEPS_READS_CAP {
- // Fill `read_set` with what we have so far so we can use the hashset next
- // time
- task_deps.read_set.extend(task_deps.reads.iter().copied());
- }
-
- #[cfg(debug_assertions)]
- {
- if let Some(target) = task_deps.node {
- let data = self.current.data.lock();
- if let Some(ref forbidden_edge) = self.current.forbidden_edge {
- let source = data[source].node;
- if forbidden_edge.test(&source, &target) {
- bug!("forbidden edge {:?} -> {:?} created", source, target)
- }
- }
- }
- }
- } else if cfg!(debug_assertions) {
- self.current.total_duplicate_read_count.fetch_add(1, Relaxed);
- }
- }
- })
- }
-}
-
-/// The capacity of the `reads` field `SmallVec`
-const TASK_DEPS_READS_CAP: usize = 8;
-type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
-#[derive(Default)]
-pub struct TaskDeps {
- #[cfg(debug_assertions)]
- node: Option<DepNode>,
- reads: EdgesVec,
- read_set: FxHashSet<DepNodeIndex>,
-}
-
-// A data structure that stores Option<DepNodeColor> values as a contiguous
-// array, using one u32 per entry.
-struct DepNodeColorMap {
- values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
-}
-
-const COMPRESSED_NONE: u32 = 0;
-const COMPRESSED_RED: u32 = 1;
-const COMPRESSED_FIRST_GREEN: u32 = 2;
-
-impl DepNodeColorMap {
- fn new(size: usize) -> DepNodeColorMap {
- DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
- }
-
- fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
- match self.values[index].load(Ordering::Acquire) {
- COMPRESSED_NONE => None,
- COMPRESSED_RED => Some(DepNodeColor::Red),
- value => {
- Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
- }
- }
- }
-
- fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
- self.values[index].store(
- match color {
- DepNodeColor::Red => COMPRESSED_RED,
- DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
- },
- Ordering::Release,
- )
- }
-}
+++ /dev/null
-pub mod debug;
-mod dep_node;
-mod graph;
-mod prev;
-mod query;
-mod safe;
-mod serialized;
-
-pub(crate) use self::dep_node::DepNodeParams;
-pub use self::dep_node::{label_strs, DepConstructor, DepKind, DepNode, WorkProductId};
-pub use self::graph::WorkProductFileKind;
-pub use self::graph::{hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, WorkProduct};
-pub use self::prev::PreviousDepGraph;
-pub use self::query::DepGraphQuery;
-pub use self::safe::AssertDepGraphSafe;
-pub use self::safe::DepGraphSafe;
-pub use self::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
+++ /dev/null
-use super::dep_node::DepNode;
-use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
-use crate::ich::Fingerprint;
-use rustc_data_structures::fx::FxHashMap;
-
-#[derive(Debug, RustcEncodable, RustcDecodable, Default)]
-pub struct PreviousDepGraph {
- data: SerializedDepGraph,
- index: FxHashMap<DepNode, SerializedDepNodeIndex>,
-}
-
-impl PreviousDepGraph {
- pub fn new(data: SerializedDepGraph) -> PreviousDepGraph {
- let index: FxHashMap<_, _> =
- data.nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
- PreviousDepGraph { data, index }
- }
-
- #[inline]
- pub fn edge_targets_from(
- &self,
- dep_node_index: SerializedDepNodeIndex,
- ) -> &[SerializedDepNodeIndex] {
- self.data.edge_targets_from(dep_node_index)
- }
-
- #[inline]
- pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
- self.data.nodes[dep_node_index]
- }
-
- #[inline]
- pub fn node_to_index(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
- self.index[dep_node]
- }
-
- #[inline]
- pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
- self.index.get(dep_node).cloned()
- }
-
- #[inline]
- pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
- self.index.get(dep_node).map(|&node_index| self.data.fingerprints[node_index])
- }
-
- #[inline]
- pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
- self.data.fingerprints[dep_node_index]
- }
-
- pub fn node_count(&self) -> usize {
- self.index.len()
- }
-}
+++ /dev/null
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::graph::implementation::{
- Direction, Graph, NodeIndex, INCOMING, OUTGOING,
-};
-
-use super::DepNode;
-
-pub struct DepGraphQuery {
- pub graph: Graph<DepNode, ()>,
- pub indices: FxHashMap<DepNode, NodeIndex>,
-}
-
-impl DepGraphQuery {
- pub fn new(nodes: &[DepNode], edges: &[(DepNode, DepNode)]) -> DepGraphQuery {
- let mut graph = Graph::with_capacity(nodes.len(), edges.len());
- let mut indices = FxHashMap::default();
- for node in nodes {
- indices.insert(node.clone(), graph.add_node(node.clone()));
- }
-
- for &(ref source, ref target) in edges {
- let source = indices[source];
- let target = indices[target];
- graph.add_edge(source, target, ());
- }
-
- DepGraphQuery { graph, indices }
- }
-
- pub fn contains_node(&self, node: &DepNode) -> bool {
- self.indices.contains_key(&node)
- }
-
- pub fn nodes(&self) -> Vec<&DepNode> {
- self.graph.all_nodes().iter().map(|n| &n.data).collect()
- }
-
- pub fn edges(&self) -> Vec<(&DepNode, &DepNode)> {
- self.graph
- .all_edges()
- .iter()
- .map(|edge| (edge.source(), edge.target()))
- .map(|(s, t)| (self.graph.node_data(s), self.graph.node_data(t)))
- .collect()
- }
-
- fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
- if let Some(&index) = self.indices.get(node) {
- self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
- } else {
- vec![]
- }
- }
-
- /// All nodes reachable from `node`. In other words, things that
- /// will have to be recomputed if `node` changes.
- pub fn transitive_successors(&self, node: &DepNode) -> Vec<&DepNode> {
- self.reachable_nodes(node, OUTGOING)
- }
-
- /// All nodes that can reach `node`.
- pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
- self.reachable_nodes(node, INCOMING)
- }
-
- /// Just the outgoing edges from `node`.
- pub fn immediate_successors(&self, node: &DepNode) -> Vec<&DepNode> {
- if let Some(&index) = self.indices.get(&node) {
- self.graph.successor_nodes(index).map(|s| self.graph.node_data(s)).collect()
- } else {
- vec![]
- }
- }
-}
+++ /dev/null
-//! The `DepGraphSafe` trait
-
-use crate::ty::TyCtxt;
-
-use rustc_ast::ast::NodeId;
-use rustc_hir::def_id::DefId;
-use rustc_hir::BodyId;
-
-/// The `DepGraphSafe` trait is used to specify what kinds of values
-/// are safe to "leak" into a task. The idea is that this should be
-/// only be implemented for things like the tcx as well as various id
-/// types, which will create reads in the dep-graph whenever the trait
-/// loads anything that might depend on the input program.
-pub trait DepGraphSafe {}
-
-/// A `BodyId` on its own doesn't give access to any particular state.
-/// You must fetch the state from the various maps or generate
-/// on-demand queries, all of which create reads.
-impl DepGraphSafe for BodyId {}
-
-/// A `NodeId` on its own doesn't give access to any particular state.
-/// You must fetch the state from the various maps or generate
-/// on-demand queries, all of which create reads.
-impl DepGraphSafe for NodeId {}
-
-/// A `DefId` on its own doesn't give access to any particular state.
-/// You must fetch the state from the various maps or generate
-/// on-demand queries, all of which create reads.
-impl DepGraphSafe for DefId {}
-
-/// The type context itself can be used to access all kinds of tracked
-/// state, but those accesses should always generate read events.
-impl<'tcx> DepGraphSafe for TyCtxt<'tcx> {}
-
-/// Tuples make it easy to build up state.
-impl<A, B> DepGraphSafe for (A, B)
-where
- A: DepGraphSafe,
- B: DepGraphSafe,
-{
-}
-
-/// Shared ref to dep-graph-safe stuff should still be dep-graph-safe.
-impl<'a, A> DepGraphSafe for &'a A where A: DepGraphSafe {}
-
-/// Mut ref to dep-graph-safe stuff should still be dep-graph-safe.
-impl<'a, A> DepGraphSafe for &'a mut A where A: DepGraphSafe {}
-
-/// No data here! :)
-impl DepGraphSafe for () {}
-
-/// A convenient override that lets you pass arbitrary state into a
-/// task. Every use should be accompanied by a comment explaining why
-/// it makes sense (or how it could be refactored away in the future).
-pub struct AssertDepGraphSafe<T>(pub T);
-
-impl<T> DepGraphSafe for AssertDepGraphSafe<T> {}
+++ /dev/null
-//! The data that we will serialize and deserialize.
-
-use crate::dep_graph::DepNode;
-use crate::ich::Fingerprint;
-use rustc_index::vec::IndexVec;
-
-rustc_index::newtype_index! {
- pub struct SerializedDepNodeIndex { .. }
-}
-
-/// Data for use when recompiling the **current crate**.
-#[derive(Debug, RustcEncodable, RustcDecodable, Default)]
-pub struct SerializedDepGraph {
- /// The set of all DepNodes in the graph
- pub nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
- /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
- /// the DepNode at the same index in the nodes vector.
- pub fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
- /// For each DepNode, stores the list of edges originating from that
- /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
- /// which holds the actual DepNodeIndices of the target nodes.
- pub edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
- /// A flattened list of all edge targets in the graph. Edge sources are
- /// implicit in edge_list_indices.
- pub edge_list_data: Vec<SerializedDepNodeIndex>,
-}
-
-impl SerializedDepGraph {
- #[inline]
- pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
- let targets = self.edge_list_indices[source];
- &self.edge_list_data[targets.0 as usize..targets.1 as usize]
- }
-}
--- /dev/null
+To learn more about how dependency tracking works in rustc, see the [rustc
+guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
--- /dev/null
+//! Code for debugging the dep-graph.
+
+use super::dep_node::DepNode;
+use std::error::Error;
+
+/// A dep-node filter goes from a user-defined string to a query over
+/// nodes. Right now the format is like this:
+///
+/// x & y & z
+///
+/// where the format-string of the dep-node must contain `x`, `y`, and
+/// `z`.
+#[derive(Debug)]
+pub struct DepNodeFilter {
+ text: String,
+}
+
+impl DepNodeFilter {
+ pub fn new(text: &str) -> Self {
+ DepNodeFilter { text: text.trim().to_string() }
+ }
+
+ /// Returns `true` if all nodes always pass the filter.
+ pub fn accepts_all(&self) -> bool {
+ self.text.is_empty()
+ }
+
+ /// Tests whether `node` meets the filter, returning true if so.
+ pub fn test(&self, node: &DepNode) -> bool {
+ let debug_str = format!("{:?}", node);
+ self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
+ }
+}
+
+/// A filter like `F -> G` where `F` and `G` are valid dep-node
+/// filters. This can be used to test the source/target independently.
+pub struct EdgeFilter {
+ pub source: DepNodeFilter,
+ pub target: DepNodeFilter,
+}
+
+impl EdgeFilter {
+ pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
+ let parts: Vec<_> = test.split("->").collect();
+ if parts.len() != 2 {
+ Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into())
+ } else {
+ Ok(EdgeFilter {
+ source: DepNodeFilter::new(parts[0]),
+ target: DepNodeFilter::new(parts[1]),
+ })
+ }
+ }
+
+ pub fn test(&self, source: &DepNode, target: &DepNode) -> bool {
+ self.source.test(source) && self.target.test(target)
+ }
+}
--- /dev/null
+//! This module defines the `DepNode` type which the compiler uses to represent
+//! nodes in the dependency graph. A `DepNode` consists of a `DepKind` (which
+//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc)
+//! and a `Fingerprint`, a 128 bit hash value the exact meaning of which
+//! depends on the node's `DepKind`. Together, the kind and the fingerprint
+//! fully identify a dependency node, even across multiple compilation sessions.
+//! In other words, the value of the fingerprint does not depend on anything
+//! that is specific to a given compilation session, like an unpredictable
+//! interning key (e.g., NodeId, DefId, Symbol) or the numeric value of a
+//! pointer. The concept behind this could be compared to how git commit hashes
+//! uniquely identify a given commit and has a few advantages:
+//!
+//! * A `DepNode` can simply be serialized to disk and loaded in another session
+//! without the need to do any "rebasing (like we have to do for Spans and
+//! NodeIds) or "retracing" like we had to do for `DefId` in earlier
+//! implementations of the dependency graph.
+//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
+//! implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
+//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
+//! memory without any post-processing (e.g., "abomination-style" pointer
+//! reconstruction).
+//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
+//! refer to things that do not exist anymore. In previous implementations
+//! `DepNode` contained a `DefId`. A `DepNode` referring to something that
+//! had been removed between the previous and the current compilation session
+//! could not be instantiated because the current compilation session
+//! contained no `DefId` for thing that had been removed.
+//!
+//! `DepNode` definition happens in the `define_dep_nodes!()` macro. This macro
+//! defines the `DepKind` enum and a corresponding `DepConstructor` enum. The
+//! `DepConstructor` enum links a `DepKind` to the parameters that are needed at
+//! runtime in order to construct a valid `DepNode` fingerprint.
+//!
+//! Because the macro sees what parameters a given `DepKind` requires, it can
+//! "infer" some properties for each kind of `DepNode`:
+//!
+//! * Whether a `DepNode` of a given kind has any parameters at all. Some
+//! `DepNode`s could represent global concepts with only one value.
+//! * Whether it is possible, in principle, to reconstruct a query key from a
+//! given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
+//! in which case it is possible to map the node's fingerprint back to the
+//! `DefId` it was computed from. In other cases, too much information gets
+//! lost during fingerprint computation.
+//!
+//! The `DepConstructor` enum, together with `DepNode::new()` ensures that only
+//! valid `DepNode` instances can be constructed. For example, the API does not
+//! allow for constructing parameterless `DepNode`s with anything other
+//! than a zeroed out fingerprint. More generally speaking, it relieves the
+//! user of the `DepNode` API of having to know how to compute the expected
+//! fingerprint for a given set of node parameters.
+
+use crate::hir::map::DefPathHash;
+use crate::ich::{Fingerprint, StableHashingContext};
+use crate::mir;
+use crate::mir::interpret::{GlobalId, LitToConstInput};
+use crate::traits;
+use crate::traits::query::{
+ CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
+ CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
+ CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal,
+};
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX};
+use rustc_hir::HirId;
+use rustc_span::symbol::Symbol;
+use std::fmt;
+use std::hash::Hash;
+
+// erase!() just makes tokens go away. It's used to specify which macro argument
+// is repeated (i.e., which sub-expression of the macro we are in) but don't need
+// to actually use any of the arguments.
+macro_rules! erase {
+ ($x:tt) => {{}};
+}
+
+macro_rules! is_anon_attr {
+ (anon) => {
+ true
+ };
+ ($attr:ident) => {
+ false
+ };
+}
+
+macro_rules! is_eval_always_attr {
+ (eval_always) => {
+ true
+ };
+ ($attr:ident) => {
+ false
+ };
+}
+
+macro_rules! contains_anon_attr {
+ ($($attr:ident $(($($attr_args:tt)*))* ),*) => ({$(is_anon_attr!($attr) | )* false});
+}
+
+macro_rules! contains_eval_always_attr {
+ ($($attr:ident $(($($attr_args:tt)*))* ),*) => ({$(is_eval_always_attr!($attr) | )* false});
+}
+
+macro_rules! define_dep_nodes {
+ (<$tcx:tt>
+ $(
+ [$($attrs:tt)*]
+ $variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
+ ,)*
+ ) => (
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+ #[allow(non_camel_case_types)]
+ pub enum DepKind {
+ $($variant),*
+ }
+
+ impl DepKind {
+ #[allow(unreachable_code)]
+ pub fn can_reconstruct_query_key<$tcx>(&self) -> bool {
+ match *self {
+ $(
+ DepKind :: $variant => {
+ if contains_anon_attr!($($attrs)*) {
+ return false;
+ }
+
+ // tuple args
+ $({
+ return <$tuple_arg_ty as DepNodeParams>
+ ::CAN_RECONSTRUCT_QUERY_KEY;
+ })*
+
+ true
+ }
+ )*
+ }
+ }
+
+ pub fn is_anon(&self) -> bool {
+ match *self {
+ $(
+ DepKind :: $variant => { contains_anon_attr!($($attrs)*) }
+ )*
+ }
+ }
+
+ pub fn is_eval_always(&self) -> bool {
+ match *self {
+ $(
+ DepKind :: $variant => { contains_eval_always_attr!($($attrs)*) }
+ )*
+ }
+ }
+
+ #[allow(unreachable_code)]
+ pub fn has_params(&self) -> bool {
+ match *self {
+ $(
+ DepKind :: $variant => {
+ // tuple args
+ $({
+ erase!($tuple_arg_ty);
+ return true;
+ })*
+
+ false
+ }
+ )*
+ }
+ }
+ }
+
+ pub struct DepConstructor;
+
+ #[allow(non_camel_case_types)]
+ impl DepConstructor {
+ $(
+ #[inline(always)]
+ #[allow(unreachable_code, non_snake_case)]
+ pub fn $variant(_tcx: TyCtxt<'_>, $(arg: $tuple_arg_ty)*) -> DepNode {
+ // tuple args
+ $({
+ erase!($tuple_arg_ty);
+ let hash = DepNodeParams::to_fingerprint(&arg, _tcx);
+ let dep_node = DepNode {
+ kind: DepKind::$variant,
+ hash
+ };
+
+ #[cfg(debug_assertions)]
+ {
+ if !dep_node.kind.can_reconstruct_query_key() &&
+ (_tcx.sess.opts.debugging_opts.incremental_info ||
+ _tcx.sess.opts.debugging_opts.query_dep_graph)
+ {
+ _tcx.dep_graph.register_dep_node_debug_str(dep_node, || {
+ arg.to_debug_str(_tcx)
+ });
+ }
+ }
+
+ return dep_node;
+ })*
+
+ DepNode {
+ kind: DepKind::$variant,
+ hash: Fingerprint::ZERO,
+ }
+ }
+ )*
+ }
+
+ #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+ pub struct DepNode {
+ pub kind: DepKind,
+ pub hash: Fingerprint,
+ }
+
+ impl DepNode {
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ pub fn from_def_path_hash(def_path_hash: DefPathHash,
+ kind: DepKind)
+ -> DepNode {
+ debug_assert!(kind.can_reconstruct_query_key() && kind.has_params());
+ DepNode {
+ kind,
+ hash: def_path_hash.0,
+ }
+ }
+
+ /// Creates a new, parameterless DepNode. This method will assert
+ /// that the DepNode corresponding to the given DepKind actually
+ /// does not require any parameters.
+ pub fn new_no_params(kind: DepKind) -> DepNode {
+ debug_assert!(!kind.has_params());
+ DepNode {
+ kind,
+ hash: Fingerprint::ZERO,
+ }
+ }
+
+ /// Extracts the DefId corresponding to this DepNode. This will work
+ /// if two conditions are met:
+ ///
+ /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+ /// 2. the item that the DefPath refers to exists in the current tcx.
+ ///
+ /// Condition (1) is determined by the DepKind variant of the
+ /// DepNode. Condition (2) might not be fulfilled if a DepNode
+ /// refers to something from the previous compilation session that
+ /// has been removed.
+ pub fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ if self.kind.can_reconstruct_query_key() {
+ let def_path_hash = DefPathHash(self.hash);
+ tcx.def_path_hash_to_def_id.as_ref()?
+ .get(&def_path_hash).cloned()
+ } else {
+ None
+ }
+ }
+
+ /// Used in testing
+ pub fn from_label_string(label: &str,
+ def_path_hash: DefPathHash)
+ -> Result<DepNode, ()> {
+ let kind = match label {
+ $(
+ stringify!($variant) => DepKind::$variant,
+ )*
+ _ => return Err(()),
+ };
+
+ if !kind.can_reconstruct_query_key() {
+ return Err(());
+ }
+
+ if kind.has_params() {
+ Ok(DepNode::from_def_path_hash(def_path_hash, kind))
+ } else {
+ Ok(DepNode::new_no_params(kind))
+ }
+ }
+
+ /// Used in testing
+ pub fn has_label_string(label: &str) -> bool {
+ match label {
+ $(
+ stringify!($variant) => true,
+ )*
+ _ => false,
+ }
+ }
+ }
+
+ /// Contains variant => str representations for constructing
+ /// DepNode groups for tests.
+ #[allow(dead_code, non_upper_case_globals)]
+ pub mod label_strs {
+ $(
+ pub const $variant: &str = stringify!($variant);
+ )*
+ }
+ );
+}
+
+impl fmt::Debug for DepNode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.kind)?;
+
+ if !self.kind.has_params() && !self.kind.is_anon() {
+ return Ok(());
+ }
+
+ write!(f, "(")?;
+
+ crate::ty::tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ if let Some(def_id) = self.extract_def_id(tcx) {
+ write!(f, "{}", tcx.def_path_debug_str(def_id))?;
+ } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*self) {
+ write!(f, "{}", s)?;
+ } else {
+ write!(f, "{}", self.hash)?;
+ }
+ } else {
+ write!(f, "{}", self.hash)?;
+ }
+ Ok(())
+ })?;
+
+ write!(f, ")")
+ }
+}
+
+rustc_dep_node_append!([define_dep_nodes!][ <'tcx>
+ // We use this for most things when incr. comp. is turned off.
+ [] Null,
+
+ // Represents metadata from an extern crate.
+ [eval_always] CrateMetadata(CrateNum),
+
+ [anon] TraitSelect,
+
+ [] CompileCodegenUnit(Symbol),
+]);
+
+pub(crate) trait DepNodeParams<'tcx>: fmt::Debug + Sized {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool;
+
+ /// This method turns the parameters of a DepNodeConstructor into an opaque
+ /// Fingerprint to be used in DepNode.
+ /// Not all DepNodeParams support being turned into a Fingerprint (they
+ /// don't need to if the corresponding DepNode is anonymous).
+ fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint {
+ panic!("Not implemented. Accidentally called on anonymous node?")
+ }
+
+ fn to_debug_str(&self, _: TyCtxt<'tcx>) -> String {
+ format!("{:?}", self)
+ }
+
+ /// This method tries to recover the query key from the given `DepNode`,
+ /// something which is needed when forcing `DepNode`s during red-green
+ /// evaluation. The query system will only call this method if
+ /// `CAN_RECONSTRUCT_QUERY_KEY` is `true`.
+ /// It is always valid to return `None` here, in which case incremental
+ /// compilation will treat the query as having changed instead of forcing it.
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self>;
+}
+
+impl<'tcx, T> DepNodeParams<'tcx> for T
+where
+ T: HashStable<StableHashingContext<'tcx>> + fmt::Debug,
+{
+ default const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
+
+ default fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let mut hcx = tcx.create_stable_hashing_context();
+ let mut hasher = StableHasher::new();
+
+ self.hash_stable(&mut hcx, &mut hasher);
+
+ hasher.finish()
+ }
+
+ default fn to_debug_str(&self, _: TyCtxt<'tcx>) -> String {
+ format!("{:?}", *self)
+ }
+
+ default fn recover(_: TyCtxt<'tcx>, _: &DepNode) -> Option<Self> {
+ None
+ }
+}
+
+impl<'tcx> DepNodeParams<'tcx> for DefId {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
+
+ fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
+ tcx.def_path_hash(*self).0
+ }
+
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.def_path_str(*self)
+ }
+
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx)
+ }
+}
+
+impl<'tcx> DepNodeParams<'tcx> for DefIndex {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
+
+ fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
+ tcx.hir().definitions().def_path_hash(*self).0
+ }
+
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.def_path_str(DefId::local(*self))
+ }
+
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| id.index)
+ }
+}
+
+impl<'tcx> DepNodeParams<'tcx> for CrateNum {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
+
+ fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
+ let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX };
+ tcx.def_path_hash(def_id).0
+ }
+
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.crate_name(*self).to_string()
+ }
+
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| id.krate)
+ }
+}
+
+impl<'tcx> DepNodeParams<'tcx> for (DefId, DefId) {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
+
+ // We actually would not need to specialize the implementation of this
+ // method but it's faster to combine the hashes than to instantiate a full
+ // hashing context and stable-hashing state.
+ fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
+ let (def_id_0, def_id_1) = *self;
+
+ let def_path_hash_0 = tcx.def_path_hash(def_id_0);
+ let def_path_hash_1 = tcx.def_path_hash(def_id_1);
+
+ def_path_hash_0.0.combine(def_path_hash_1.0)
+ }
+
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ let (def_id_0, def_id_1) = *self;
+
+ format!("({}, {})", tcx.def_path_debug_str(def_id_0), tcx.def_path_debug_str(def_id_1))
+ }
+}
+
+impl<'tcx> DepNodeParams<'tcx> for HirId {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
+
+ // We actually would not need to specialize the implementation of this
+ // method but it's faster to combine the hashes than to instantiate a full
+ // hashing context and stable-hashing state.
+ fn to_fingerprint(&self, tcx: TyCtxt<'_>) -> Fingerprint {
+ let HirId { owner, local_id } = *self;
+
+ let def_path_hash = tcx.def_path_hash(DefId::local(owner));
+ let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
+
+ def_path_hash.0.combine(local_id)
+ }
+}
+
+/// A "work product" corresponds to a `.o` (or other) file that we
+/// save in between runs. These IDs do not have a `DefId` but rather
+/// some independent path or string that persists between runs without
+/// the need to be mapped or unmapped. (This ensures we can serialize
+/// them even in the absence of a tcx.)
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ PartialOrd,
+ Ord,
+ Hash,
+ RustcEncodable,
+ RustcDecodable,
+ HashStable
+)]
+pub struct WorkProductId {
+ hash: Fingerprint,
+}
+
+impl WorkProductId {
+ pub fn from_cgu_name(cgu_name: &str) -> WorkProductId {
+ let mut hasher = StableHasher::new();
+ cgu_name.len().hash(&mut hasher);
+ cgu_name.hash(&mut hasher);
+ WorkProductId { hash: hasher.finish() }
+ }
+
+ pub fn from_fingerprint(fingerprint: Fingerprint) -> WorkProductId {
+ WorkProductId { hash: fingerprint }
+ }
+}
--- /dev/null
+use crate::ty::{self, TyCtxt};
+use parking_lot::{Condvar, Mutex};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::profiling::QueryInvocationId;
+use rustc_data_structures::sharded::{self, Sharded};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
+use rustc_errors::Diagnostic;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::{Idx, IndexVec};
+use smallvec::SmallVec;
+use std::collections::hash_map::Entry;
+use std::env;
+use std::hash::Hash;
+use std::mem;
+use std::sync::atomic::Ordering::Relaxed;
+
+use crate::ich::{Fingerprint, StableHashingContext, StableHashingContextProvider};
+
+use super::debug::EdgeFilter;
+use super::dep_node::{DepKind, DepNode, WorkProductId};
+use super::prev::PreviousDepGraph;
+use super::query::DepGraphQuery;
+use super::safe::DepGraphSafe;
+use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
+
+#[derive(Clone)]
+pub struct DepGraph {
+ data: Option<Lrc<DepGraphData>>,
+
+ /// This field is used for assigning DepNodeIndices when running in
+ /// non-incremental mode. Even in non-incremental mode we make sure that
+ /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
+ /// ID is used for self-profiling.
+ virtual_dep_node_index: Lrc<AtomicU32>,
+}
+
+rustc_index::newtype_index! {
+ pub struct DepNodeIndex { .. }
+}
+
+impl DepNodeIndex {
+ pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
+}
+
+impl std::convert::From<DepNodeIndex> for QueryInvocationId {
+ #[inline]
+ fn from(dep_node_index: DepNodeIndex) -> Self {
+ QueryInvocationId(dep_node_index.as_u32())
+ }
+}
+
+#[derive(PartialEq)]
+pub enum DepNodeColor {
+ Red,
+ Green(DepNodeIndex),
+}
+
+impl DepNodeColor {
+ pub fn is_green(self) -> bool {
+ match self {
+ DepNodeColor::Red => false,
+ DepNodeColor::Green(_) => true,
+ }
+ }
+}
+
+struct DepGraphData {
+ /// The new encoding of the dependency graph, optimized for red/green
+ /// tracking. The `current` field is the dependency graph of only the
+ /// current compilation session: We don't merge the previous dep-graph into
+ /// current one anymore.
+ current: CurrentDepGraph,
+
+ /// The dep-graph from the previous compilation session. It contains all
+ /// nodes and edges as well as all fingerprints of nodes that have them.
+ previous: PreviousDepGraph,
+
+ colors: DepNodeColorMap,
+
+ /// A set of loaded diagnostics that is in the progress of being emitted.
+ emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
+
+ /// Used to wait for diagnostics to be emitted.
+ emitting_diagnostics_cond_var: Condvar,
+
+ /// When we load, there may be `.o` files, cached MIR, or other such
+ /// things available to us. If we find that they are not dirty, we
+ /// load the path to the file storing those work-products here into
+ /// this map. We can later look for and extract that data.
+ previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
+
+ dep_node_debug: Lock<FxHashMap<DepNode, String>>,
+}
+
+pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
+where
+ R: for<'a> HashStable<StableHashingContext<'a>>,
+{
+ let mut stable_hasher = StableHasher::new();
+ result.hash_stable(hcx, &mut stable_hasher);
+
+ Some(stable_hasher.finish())
+}
+
+impl DepGraph {
+ pub fn new(
+ prev_graph: PreviousDepGraph,
+ prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
+ ) -> DepGraph {
+ let prev_graph_node_count = prev_graph.node_count();
+
+ DepGraph {
+ data: Some(Lrc::new(DepGraphData {
+ previous_work_products: prev_work_products,
+ dep_node_debug: Default::default(),
+ current: CurrentDepGraph::new(prev_graph_node_count),
+ emitting_diagnostics: Default::default(),
+ emitting_diagnostics_cond_var: Condvar::new(),
+ previous: prev_graph,
+ colors: DepNodeColorMap::new(prev_graph_node_count),
+ })),
+ virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
+ }
+ }
+
+ pub fn new_disabled() -> DepGraph {
+ DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
+ }
+
+ /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
+ #[inline]
+ pub fn is_fully_enabled(&self) -> bool {
+ self.data.is_some()
+ }
+
+ pub fn query(&self) -> DepGraphQuery {
+ let data = self.data.as_ref().unwrap().current.data.lock();
+ let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
+ let mut edges = Vec::new();
+ for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
+ for &edge_target in edge_targets.iter() {
+ let to = data[edge_target].node;
+ edges.push((from, to));
+ }
+ }
+
+ DepGraphQuery::new(&nodes[..], &edges[..])
+ }
+
+ pub fn assert_ignored(&self) {
+ if let Some(..) = self.data {
+ ty::tls::with_context_opt(|icx| {
+ let icx = if let Some(icx) = icx { icx } else { return };
+ assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
+ })
+ }
+ }
+
+ pub fn with_ignore<OP, R>(&self, op: OP) -> R
+ where
+ OP: FnOnce() -> R,
+ {
+ ty::tls::with_context(|icx| {
+ let icx = ty::tls::ImplicitCtxt { task_deps: None, ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| op())
+ })
+ }
+
+ /// Starts a new dep-graph task. Dep-graph tasks are specified
+ /// using a free function (`task`) and **not** a closure -- this
+ /// is intentional because we want to exercise tight control over
+ /// what state they have access to. In particular, we want to
+ /// prevent implicit 'leaks' of tracked state into the task (which
+ /// could then be read without generating correct edges in the
+ /// dep-graph -- see the [rustc dev guide] for more details on
+ /// the dep-graph). To this end, the task function gets exactly two
+ /// pieces of state: the context `cx` and an argument `arg`. Both
+ /// of these bits of state must be of some type that implements
+ /// `DepGraphSafe` and hence does not leak.
+ ///
+ /// The choice of two arguments is not fundamental. One argument
+ /// would work just as well, since multiple values can be
+ /// collected using tuples. However, using two arguments works out
+ /// to be quite convenient, since it is common to need a context
+ /// (`cx`) and some argument (e.g., a `DefId` identifying what
+ /// item to process).
+ ///
+ /// For cases where you need some other number of arguments:
+ ///
+ /// - If you only need one argument, just use `()` for the `arg`
+ /// parameter.
+ /// - If you need 3+ arguments, use a tuple for the
+ /// `arg` parameter.
+ ///
+ /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
+ pub fn with_task<'a, C, A, R>(
+ &self,
+ key: DepNode,
+ cx: C,
+ arg: A,
+ task: fn(C, A) -> R,
+ hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
+ ) -> (R, DepNodeIndex)
+ where
+ C: DepGraphSafe + StableHashingContextProvider<'a>,
+ {
+ self.with_task_impl(
+ key,
+ cx,
+ arg,
+ false,
+ task,
+ |_key| {
+ Some(TaskDeps {
+ #[cfg(debug_assertions)]
+ node: Some(_key),
+ reads: SmallVec::new(),
+ read_set: Default::default(),
+ })
+ },
+ |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint),
+ hash_result,
+ )
+ }
+
+ fn with_task_impl<'a, C, A, R>(
+ &self,
+ key: DepNode,
+ cx: C,
+ arg: A,
+ no_tcx: bool,
+ task: fn(C, A) -> R,
+ create_task: fn(DepNode) -> Option<TaskDeps>,
+ finish_task_and_alloc_depnode: fn(
+ &CurrentDepGraph,
+ DepNode,
+ Fingerprint,
+ Option<TaskDeps>,
+ ) -> DepNodeIndex,
+ hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
+ ) -> (R, DepNodeIndex)
+ where
+ C: DepGraphSafe + StableHashingContextProvider<'a>,
+ {
+ if let Some(ref data) = self.data {
+ let task_deps = create_task(key).map(Lock::new);
+
+ // In incremental mode, hash the result of the task. We don't
+ // do anything with the hash yet, but we are computing it
+ // anyway so that
+ // - we make sure that the infrastructure works and
+ // - we can get an idea of the runtime cost.
+ let mut hcx = cx.get_stable_hashing_context();
+
+ let result = if no_tcx {
+ task(cx, arg)
+ } else {
+ ty::tls::with_context(|icx| {
+ let icx =
+ ty::tls::ImplicitCtxt { task_deps: task_deps.as_ref(), ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| task(cx, arg))
+ })
+ };
+
+ let current_fingerprint = hash_result(&mut hcx, &result);
+
+ let dep_node_index = finish_task_and_alloc_depnode(
+ &data.current,
+ key,
+ current_fingerprint.unwrap_or(Fingerprint::ZERO),
+ task_deps.map(|lock| lock.into_inner()),
+ );
+
+ let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks;
+
+ // Determine the color of the new DepNode.
+ if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
+ let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
+
+ let color = if let Some(current_fingerprint) = current_fingerprint {
+ if current_fingerprint == prev_fingerprint {
+ if print_status {
+ eprintln!("[task::green] {:?}", key);
+ }
+ DepNodeColor::Green(dep_node_index)
+ } else {
+ if print_status {
+ eprintln!("[task::red] {:?}", key);
+ }
+ DepNodeColor::Red
+ }
+ } else {
+ if print_status {
+ eprintln!("[task::unknown] {:?}", key);
+ }
+ // Mark the node as Red if we can't hash the result
+ DepNodeColor::Red
+ };
+
+ debug_assert!(
+ data.colors.get(prev_index).is_none(),
+ "DepGraph::with_task() - Duplicate DepNodeColor \
+ insertion for {:?}",
+ key
+ );
+
+ data.colors.insert(prev_index, color);
+ } else {
+ if print_status {
+ eprintln!("[task::new] {:?}", key);
+ }
+ }
+
+ (result, dep_node_index)
+ } else {
+ (task(cx, arg), self.next_virtual_depnode_index())
+ }
+ }
+
+ /// Executes something within an "anonymous" task, that is, a task the
+ /// `DepNode` of which is determined by the list of inputs it read from.
+ pub fn with_anon_task<OP, R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
+ where
+ OP: FnOnce() -> R,
+ {
+ if let Some(ref data) = self.data {
+ let (result, task_deps) = ty::tls::with_context(|icx| {
+ let task_deps = Lock::new(TaskDeps::default());
+
+ let r = {
+ let icx = ty::tls::ImplicitCtxt { task_deps: Some(&task_deps), ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| op())
+ };
+
+ (r, task_deps.into_inner())
+ });
+ let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps);
+ (result, dep_node_index)
+ } else {
+ (op(), self.next_virtual_depnode_index())
+ }
+ }
+
+ /// Executes something within an "eval-always" task which is a task
+ /// that runs whenever anything changes.
+ pub fn with_eval_always_task<'a, C, A, R>(
+ &self,
+ key: DepNode,
+ cx: C,
+ arg: A,
+ task: fn(C, A) -> R,
+ hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
+ ) -> (R, DepNodeIndex)
+ where
+ C: DepGraphSafe + StableHashingContextProvider<'a>,
+ {
+ self.with_task_impl(
+ key,
+ cx,
+ arg,
+ false,
+ task,
+ |_| None,
+ |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint),
+ hash_result,
+ )
+ }
+
+ #[inline]
+ pub fn read(&self, v: DepNode) {
+ if let Some(ref data) = self.data {
+ let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
+ if let Some(dep_node_index) = map.get(&v).copied() {
+ std::mem::drop(map);
+ data.read_index(dep_node_index);
+ } else {
+ bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
+ }
+ }
+ }
+
+ #[inline]
+ pub fn read_index(&self, dep_node_index: DepNodeIndex) {
+ if let Some(ref data) = self.data {
+ data.read_index(dep_node_index);
+ }
+ }
+
+ #[inline]
+ pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
+ self.data
+ .as_ref()
+ .unwrap()
+ .current
+ .node_to_node_index
+ .get_shard_by_value(dep_node)
+ .lock()
+ .get(dep_node)
+ .cloned()
+ .unwrap()
+ }
+
+ #[inline]
+ pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
+ if let Some(ref data) = self.data {
+ data.current
+ .node_to_node_index
+ .get_shard_by_value(&dep_node)
+ .lock()
+ .contains_key(dep_node)
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
+ let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
+ data[dep_node_index].fingerprint
+ }
+
+ pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
+ self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
+ }
+
+ #[inline]
+ pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
+ self.data.as_ref().unwrap().previous.node_to_index(dep_node)
+ }
+
+ /// Checks whether a previous work product exists for `v` and, if
+ /// so, return the path that leads to it. Used to skip doing work.
+ pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
+ self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
+ }
+
+ /// Access the map of work-products created during the cached run. Only
+ /// used during saving of the dep-graph.
+ pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
+ &self.data.as_ref().unwrap().previous_work_products
+ }
+
+ #[inline(always)]
+ pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
+ where
+ F: FnOnce() -> String,
+ {
+ let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
+
+ if dep_node_debug.borrow().contains_key(&dep_node) {
+ return;
+ }
+ let debug_str = debug_str_gen();
+ dep_node_debug.borrow_mut().insert(dep_node, debug_str);
+ }
+
+ pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
+ self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
+ }
+
+ pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
+ if cfg!(debug_assertions) {
+ let current_dep_graph = &self.data.as_ref().unwrap().current;
+
+ Some((
+ current_dep_graph.total_read_count.load(Relaxed),
+ current_dep_graph.total_duplicate_read_count.load(Relaxed),
+ ))
+ } else {
+ None
+ }
+ }
+
+ pub fn serialize(&self) -> SerializedDepGraph {
+ let data = self.data.as_ref().unwrap().current.data.lock();
+
+ let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
+ data.iter().map(|d| d.fingerprint).collect();
+ let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect();
+
+ let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
+
+ let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
+ let mut edge_list_data = Vec::with_capacity(total_edge_count);
+
+ for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
+ let start = edge_list_data.len() as u32;
+ // This should really just be a memcpy :/
+ edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
+ let end = edge_list_data.len() as u32;
+
+ debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
+ edge_list_indices.push((start, end));
+ }
+
+ debug_assert!(edge_list_data.len() <= u32::MAX as usize);
+ debug_assert_eq!(edge_list_data.len(), total_edge_count);
+
+ SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
+ }
+
+ pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
+ if let Some(ref data) = self.data {
+ if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
+ return data.colors.get(prev_index);
+ } else {
+ // This is a node that did not exist in the previous compilation
+ // session, so we consider it to be red.
+ return Some(DepNodeColor::Red);
+ }
+ }
+
+ None
+ }
+
+ /// Try to read a node index for the node dep_node.
+ /// A node will have an index, when it's already been marked green, or when we can mark it
+ /// green. This function will mark the current task as a reader of the specified node, when
+ /// a node index can be found for that node.
+ pub fn try_mark_green_and_read(
+ &self,
+ tcx: TyCtxt<'_>,
+ dep_node: &DepNode,
+ ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
+ self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
+ debug_assert!(self.is_green(&dep_node));
+ self.read_index(dep_node_index);
+ (prev_index, dep_node_index)
+ })
+ }
+
+ pub fn try_mark_green(
+ &self,
+ tcx: TyCtxt<'_>,
+ dep_node: &DepNode,
+ ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
+ debug_assert!(!dep_node.kind.is_eval_always());
+
+ // Return None if the dep graph is disabled
+ let data = self.data.as_ref()?;
+
+ // Return None if the dep node didn't exist in the previous session
+ let prev_index = data.previous.node_to_index_opt(dep_node)?;
+
+ match data.colors.get(prev_index) {
+ Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
+ Some(DepNodeColor::Red) => None,
+ None => {
+ // This DepNode and the corresponding query invocation existed
+ // in the previous compilation session too, so we can try to
+ // mark it as green by recursively marking all of its
+ // dependencies green.
+ self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
+ .map(|dep_node_index| (prev_index, dep_node_index))
+ }
+ }
+ }
+
+ /// Try to mark a dep-node which existed in the previous compilation session as green.
+ fn try_mark_previous_green<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ data: &DepGraphData,
+ prev_dep_node_index: SerializedDepNodeIndex,
+ dep_node: &DepNode,
+ ) -> Option<DepNodeIndex> {
+ debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
+
+ #[cfg(not(parallel_compiler))]
+ {
+ debug_assert!(
+ !data
+ .current
+ .node_to_node_index
+ .get_shard_by_value(dep_node)
+ .lock()
+ .contains_key(dep_node)
+ );
+ debug_assert!(data.colors.get(prev_dep_node_index).is_none());
+ }
+
+ // We never try to mark eval_always nodes as green
+ debug_assert!(!dep_node.kind.is_eval_always());
+
+ debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
+
+ let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
+
+ let mut current_deps = SmallVec::new();
+
+ for &dep_dep_node_index in prev_deps {
+ let dep_dep_node_color = data.colors.get(dep_dep_node_index);
+
+ match dep_dep_node_color {
+ Some(DepNodeColor::Green(node_index)) => {
+ // This dependency has been marked as green before, we are
+ // still fine and can continue with checking the other
+ // dependencies.
+ debug!(
+ "try_mark_previous_green({:?}) --- found dependency {:?} to \
+ be immediately green",
+ dep_node,
+ data.previous.index_to_node(dep_dep_node_index)
+ );
+ current_deps.push(node_index);
+ }
+ Some(DepNodeColor::Red) => {
+ // We found a dependency the value of which has changed
+ // compared to the previous compilation session. We cannot
+ // mark the DepNode as green and also don't need to bother
+ // with checking any of the other dependencies.
+ debug!(
+ "try_mark_previous_green({:?}) - END - dependency {:?} was \
+ immediately red",
+ dep_node,
+ data.previous.index_to_node(dep_dep_node_index)
+ );
+ return None;
+ }
+ None => {
+ let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
+
+ // We don't know the state of this dependency. If it isn't
+ // an eval_always node, let's try to mark it green recursively.
+ if !dep_dep_node.kind.is_eval_always() {
+ debug!(
+ "try_mark_previous_green({:?}) --- state of dependency {:?} \
+ is unknown, trying to mark it green",
+ dep_node, dep_dep_node
+ );
+
+ let node_index = self.try_mark_previous_green(
+ tcx,
+ data,
+ dep_dep_node_index,
+ dep_dep_node,
+ );
+ if let Some(node_index) = node_index {
+ debug!(
+ "try_mark_previous_green({:?}) --- managed to MARK \
+ dependency {:?} as green",
+ dep_node, dep_dep_node
+ );
+ current_deps.push(node_index);
+ continue;
+ }
+ } else {
+ // FIXME: This match is just a workaround for incremental bugs and should
+ // be removed. https://github.com/rust-lang/rust/issues/62649 is one such
+ // bug that must be fixed before removing this.
+ match dep_dep_node.kind {
+ DepKind::hir_owner
+ | DepKind::hir_owner_nodes
+ | DepKind::CrateMetadata => {
+ if let Some(def_id) = dep_dep_node.extract_def_id(tcx) {
+ if def_id_corresponds_to_hir_dep_node(tcx, def_id) {
+ if dep_dep_node.kind == DepKind::CrateMetadata {
+ // The `DefPath` has corresponding node,
+ // and that node should have been marked
+ // either red or green in `data.colors`.
+ bug!(
+ "DepNode {:?} should have been \
+ pre-marked as red or green but wasn't.",
+ dep_dep_node
+ );
+ }
+ } else {
+ // This `DefPath` does not have a
+ // corresponding `DepNode` (e.g. a
+ // struct field), and the ` DefPath`
+ // collided with the `DefPath` of a
+ // proper item that existed in the
+ // previous compilation session.
+ //
+ // Since the given `DefPath` does not
+ // denote the item that previously
+ // existed, we just fail to mark green.
+ return None;
+ }
+ } else {
+ // If the node does not exist anymore, we
+ // just fail to mark green.
+ return None;
+ }
+ }
+ _ => {
+ // For other kinds of nodes it's OK to be
+ // forced.
+ }
+ }
+ }
+
+ // We failed to mark it green, so we try to force the query.
+ debug!(
+ "try_mark_previous_green({:?}) --- trying to force \
+ dependency {:?}",
+ dep_node, dep_dep_node
+ );
+ if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
+ let dep_dep_node_color = data.colors.get(dep_dep_node_index);
+
+ match dep_dep_node_color {
+ Some(DepNodeColor::Green(node_index)) => {
+ debug!(
+ "try_mark_previous_green({:?}) --- managed to \
+ FORCE dependency {:?} to green",
+ dep_node, dep_dep_node
+ );
+ current_deps.push(node_index);
+ }
+ Some(DepNodeColor::Red) => {
+ debug!(
+ "try_mark_previous_green({:?}) - END - \
+ dependency {:?} was red after forcing",
+ dep_node, dep_dep_node
+ );
+ return None;
+ }
+ None => {
+ if !tcx.sess.has_errors_or_delayed_span_bugs() {
+ bug!(
+ "try_mark_previous_green() - Forcing the DepNode \
+ should have set its color"
+ )
+ } else {
+ // If the query we just forced has resulted in
+ // some kind of compilation error, we cannot rely on
+ // the dep-node color having been properly updated.
+ // This means that the query system has reached an
+ // invalid state. We let the compiler continue (by
+ // returning `None`) so it can emit error messages
+ // and wind down, but rely on the fact that this
+ // invalid state will not be persisted to the
+ // incremental compilation cache because of
+ // compilation errors being present.
+ debug!(
+ "try_mark_previous_green({:?}) - END - \
+ dependency {:?} resulted in compilation error",
+ dep_node, dep_dep_node
+ );
+ return None;
+ }
+ }
+ }
+ } else {
+ // The DepNode could not be forced.
+ debug!(
+ "try_mark_previous_green({:?}) - END - dependency {:?} \
+ could not be forced",
+ dep_node, dep_dep_node
+ );
+ return None;
+ }
+ }
+ }
+ }
+
+ // If we got here without hitting a `return` that means that all
+ // dependencies of this DepNode could be marked as green. Therefore we
+ // can also mark this DepNode as green.
+
+ // There may be multiple threads trying to mark the same dep node green concurrently
+
+ let dep_node_index = {
+ // Copy the fingerprint from the previous graph,
+ // so we don't have to recompute it
+ let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
+
+ // We allocating an entry for the node in the current dependency graph and
+ // adding all the appropriate edges imported from the previous graph
+ data.current.intern_node(*dep_node, current_deps, fingerprint)
+ };
+
+ // ... emitting any stored diagnostic ...
+
+ // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
+ // Maybe store a list on disk and encode this fact in the DepNodeState
+ let diagnostics = tcx.queries.on_disk_cache.load_diagnostics(tcx, prev_dep_node_index);
+
+ #[cfg(not(parallel_compiler))]
+ debug_assert!(
+ data.colors.get(prev_dep_node_index).is_none(),
+ "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
+ insertion for {:?}",
+ dep_node
+ );
+
+ if unlikely!(!diagnostics.is_empty()) {
+ self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
+ }
+
+ // ... and finally storing a "Green" entry in the color map.
+ // Multiple threads can all write the same color here
+ data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
+
+ debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
+ Some(dep_node_index)
+ }
+
+ /// Atomically emits some loaded diagnostics.
+ /// This may be called concurrently on multiple threads for the same dep node.
+ #[cold]
+ #[inline(never)]
+ fn emit_diagnostics<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ data: &DepGraphData,
+ dep_node_index: DepNodeIndex,
+ prev_dep_node_index: SerializedDepNodeIndex,
+ diagnostics: Vec<Diagnostic>,
+ ) {
+ let mut emitting = data.emitting_diagnostics.lock();
+
+ if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
+ // The node is already green so diagnostics must have been emitted already
+ return;
+ }
+
+ if emitting.insert(dep_node_index) {
+ // We were the first to insert the node in the set so this thread
+ // must emit the diagnostics and signal other potentially waiting
+ // threads after.
+ mem::drop(emitting);
+
+ // Promote the previous diagnostics to the current session.
+ tcx.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics.clone().into());
+
+ let handle = tcx.sess.diagnostic();
+
+ for diagnostic in diagnostics {
+ handle.emit_diagnostic(&diagnostic);
+ }
+
+ // Mark the node as green now that diagnostics are emitted
+ data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
+
+ // Remove the node from the set
+ data.emitting_diagnostics.lock().remove(&dep_node_index);
+
+ // Wake up waiters
+ data.emitting_diagnostics_cond_var.notify_all();
+ } else {
+ // We must wait for the other thread to finish emitting the diagnostic
+
+ loop {
+ data.emitting_diagnostics_cond_var.wait(&mut emitting);
+ if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
+ {
+ break;
+ }
+ }
+ }
+ }
+
+ // Returns true if the given node has been marked as green during the
+ // current compilation session. Used in various assertions
+ pub fn is_green(&self, dep_node: &DepNode) -> bool {
+ self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
+ }
+
+ // This method loads all on-disk cacheable query results into memory, so
+ // they can be written out to the new cache file again. Most query results
+ // will already be in memory but in the case where we marked something as
+ // green but then did not need the value, that value will never have been
+ // loaded from disk.
+ //
+ // This method will only load queries that will end up in the disk cache.
+ // Other queries will not be executed.
+ pub fn exec_cache_promotions(&self, tcx: TyCtxt<'_>) {
+ let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
+
+ let data = self.data.as_ref().unwrap();
+ for prev_index in data.colors.values.indices() {
+ match data.colors.get(prev_index) {
+ Some(DepNodeColor::Green(_)) => {
+ let dep_node = data.previous.index_to_node(prev_index);
+ dep_node.try_load_from_on_disk_cache(tcx);
+ }
+ None | Some(DepNodeColor::Red) => {
+ // We can skip red nodes because a node can only be marked
+ // as red if the query result was recomputed and thus is
+ // already in memory.
+ }
+ }
+ }
+ }
+
+ fn next_virtual_depnode_index(&self) -> DepNodeIndex {
+ let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
+ DepNodeIndex::from_u32(index)
+ }
+}
+
+fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
+ def_id.index == hir_id.owner.local_def_index
+}
+
+/// A "work product" is an intermediate result that we save into the
+/// incremental directory for later re-use. The primary example are
+/// the object files that we save for each partition at code
+/// generation time.
+///
+/// Each work product is associated with a dep-node, representing the
+/// process that produced the work-product. If that dep-node is found
+/// to be dirty when we load up, then we will delete the work-product
+/// at load time. If the work-product is found to be clean, then we
+/// will keep a record in the `previous_work_products` list.
+///
+/// In addition, work products have an associated hash. This hash is
+/// an extra hash that can be used to decide if the work-product from
+/// a previous compilation can be re-used (in addition to the dirty
+/// edges check).
+///
+/// As the primary example, consider the object files we generate for
+/// each partition. In the first run, we create partitions based on
+/// the symbols that need to be compiled. For each partition P, we
+/// hash the symbols in P and create a `WorkProduct` record associated
+/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
+/// in P.
+///
+/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
+/// judged to be clean (which means none of the things we read to
+/// generate the partition were found to be dirty), it will be loaded
+/// into previous work products. We will then regenerate the set of
+/// symbols in the partition P and hash them (note that new symbols
+/// may be added -- for example, new monomorphizations -- even if
+/// nothing in P changed!). We will compare that hash against the
+/// previous hash. If it matches up, we can reuse the object file.
+#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+pub struct WorkProduct {
+ pub cgu_name: String,
+ /// Saved files associated with this CGU.
+ pub saved_files: Vec<(WorkProductFileKind, String)>,
+}
+
+#[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
+pub enum WorkProductFileKind {
+ Object,
+ Bytecode,
+ BytecodeCompressed,
+}
+
+#[derive(Clone)]
+struct DepNodeData {
+ node: DepNode,
+ edges: EdgesVec,
+ fingerprint: Fingerprint,
+}
+
+/// `CurrentDepGraph` stores the dependency graph for the current session.
+/// It will be populated as we run queries or tasks.
+///
+/// The nodes in it are identified by an index (`DepNodeIndex`).
+/// The data for each node is stored in its `DepNodeData`, found in the `data` field.
+///
+/// We never remove nodes from the graph: they are only added.
+///
+/// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
+/// locked separately. Operations that take a `DepNodeIndex` typically just access
+/// the data field.
+///
+/// The only operation that must manipulate both locks is adding new nodes, in which case
+/// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
+/// acquire the lock on `data.`
+pub(super) struct CurrentDepGraph {
+ data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
+ node_to_node_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
+
+ /// Used to trap when a specific edge is added to the graph.
+ /// This is used for debug purposes and is only active with `debug_assertions`.
+ #[allow(dead_code)]
+ forbidden_edge: Option<EdgeFilter>,
+
+ /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
+ /// their edges. This has the beneficial side-effect that multiple anonymous
+ /// nodes can be coalesced into one without changing the semantics of the
+ /// dependency graph. However, the merging of nodes can lead to a subtle
+ /// problem during red-green marking: The color of an anonymous node from
+ /// the current session might "shadow" the color of the node with the same
+ /// ID from the previous session. In order to side-step this problem, we make
+ /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
+ /// This is implemented by mixing a session-key into the ID fingerprint of
+ /// each anon node. The session-key is just a random number generated when
+ /// the `DepGraph` is created.
+ anon_id_seed: Fingerprint,
+
+ /// These are simple counters that are for profiling and
+ /// debugging and only active with `debug_assertions`.
+ total_read_count: AtomicU64,
+ total_duplicate_read_count: AtomicU64,
+}
+
+impl CurrentDepGraph {
+ fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
+ let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
+ let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
+ let mut stable_hasher = StableHasher::new();
+ nanos.hash(&mut stable_hasher);
+
+ let forbidden_edge = if cfg!(debug_assertions) {
+ match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
+ Ok(s) => match EdgeFilter::new(&s) {
+ Ok(f) => Some(f),
+ Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
+ },
+ Err(_) => None,
+ }
+ } else {
+ None
+ };
+
+ // Pre-allocate the dep node structures. We over-allocate a little so
+ // that we hopefully don't have to re-allocate during this compilation
+ // session. The over-allocation is 2% plus a small constant to account
+ // for the fact that in very small crates 2% might not be enough.
+ let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
+
+ CurrentDepGraph {
+ data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
+ node_to_node_index: Sharded::new(|| {
+ FxHashMap::with_capacity_and_hasher(
+ new_node_count_estimate / sharded::SHARDS,
+ Default::default(),
+ )
+ }),
+ anon_id_seed: stable_hasher.finish(),
+ forbidden_edge,
+ total_read_count: AtomicU64::new(0),
+ total_duplicate_read_count: AtomicU64::new(0),
+ }
+ }
+
+ fn complete_task(
+ &self,
+ node: DepNode,
+ task_deps: TaskDeps,
+ fingerprint: Fingerprint,
+ ) -> DepNodeIndex {
+ self.alloc_node(node, task_deps.reads, fingerprint)
+ }
+
+ fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
+ debug_assert!(!kind.is_eval_always());
+
+ let mut hasher = StableHasher::new();
+
+ // The dep node indices are hashed here instead of hashing the dep nodes of the
+ // dependencies. These indices may refer to different nodes per session, but this isn't
+ // a problem here because we that ensure the final dep node hash is per session only by
+ // combining it with the per session random number `anon_id_seed`. This hash only need
+ // to map the dependencies to a single value on a per session basis.
+ task_deps.reads.hash(&mut hasher);
+
+ let target_dep_node = DepNode {
+ kind,
+
+ // Fingerprint::combine() is faster than sending Fingerprint
+ // through the StableHasher (at least as long as StableHasher
+ // is so slow).
+ hash: self.anon_id_seed.combine(hasher.finish()),
+ };
+
+ self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
+ }
+
+ fn alloc_node(
+ &self,
+ dep_node: DepNode,
+ edges: EdgesVec,
+ fingerprint: Fingerprint,
+ ) -> DepNodeIndex {
+ debug_assert!(
+ !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node)
+ );
+ self.intern_node(dep_node, edges, fingerprint)
+ }
+
+ fn intern_node(
+ &self,
+ dep_node: DepNode,
+ edges: EdgesVec,
+ fingerprint: Fingerprint,
+ ) -> DepNodeIndex {
+ match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
+ Entry::Occupied(entry) => *entry.get(),
+ Entry::Vacant(entry) => {
+ let mut data = self.data.lock();
+ let dep_node_index = DepNodeIndex::new(data.len());
+ data.push(DepNodeData { node: dep_node, edges, fingerprint });
+ entry.insert(dep_node_index);
+ dep_node_index
+ }
+ }
+ }
+}
+
+impl DepGraphData {
+ #[inline(never)]
+ fn read_index(&self, source: DepNodeIndex) {
+ ty::tls::with_context_opt(|icx| {
+ let icx = if let Some(icx) = icx { icx } else { return };
+ if let Some(task_deps) = icx.task_deps {
+ let mut task_deps = task_deps.lock();
+ let task_deps = &mut *task_deps;
+ if cfg!(debug_assertions) {
+ self.current.total_read_count.fetch_add(1, Relaxed);
+ }
+
+ // As long as we only have a low number of reads we can avoid doing a hash
+ // insert and potentially allocating/reallocating the hashmap
+ let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
+ task_deps.reads.iter().all(|other| *other != source)
+ } else {
+ task_deps.read_set.insert(source)
+ };
+ if new_read {
+ task_deps.reads.push(source);
+ if task_deps.reads.len() == TASK_DEPS_READS_CAP {
+ // Fill `read_set` with what we have so far so we can use the hashset next
+ // time
+ task_deps.read_set.extend(task_deps.reads.iter().copied());
+ }
+
+ #[cfg(debug_assertions)]
+ {
+ if let Some(target) = task_deps.node {
+ let data = self.current.data.lock();
+ if let Some(ref forbidden_edge) = self.current.forbidden_edge {
+ let source = data[source].node;
+ if forbidden_edge.test(&source, &target) {
+ bug!("forbidden edge {:?} -> {:?} created", source, target)
+ }
+ }
+ }
+ }
+ } else if cfg!(debug_assertions) {
+ self.current.total_duplicate_read_count.fetch_add(1, Relaxed);
+ }
+ }
+ })
+ }
+}
+
+/// The capacity of the `reads` field `SmallVec`
+const TASK_DEPS_READS_CAP: usize = 8;
+type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
+#[derive(Default)]
+pub struct TaskDeps {
+ #[cfg(debug_assertions)]
+ node: Option<DepNode>,
+ reads: EdgesVec,
+ read_set: FxHashSet<DepNodeIndex>,
+}
+
+// A data structure that stores Option<DepNodeColor> values as a contiguous
+// array, using one u32 per entry.
+struct DepNodeColorMap {
+ values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
+}
+
+const COMPRESSED_NONE: u32 = 0;
+const COMPRESSED_RED: u32 = 1;
+const COMPRESSED_FIRST_GREEN: u32 = 2;
+
+impl DepNodeColorMap {
+ fn new(size: usize) -> DepNodeColorMap {
+ DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
+ }
+
+ fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
+ match self.values[index].load(Ordering::Acquire) {
+ COMPRESSED_NONE => None,
+ COMPRESSED_RED => Some(DepNodeColor::Red),
+ value => {
+ Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
+ }
+ }
+ }
+
+ fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
+ self.values[index].store(
+ match color {
+ DepNodeColor::Red => COMPRESSED_RED,
+ DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
+ },
+ Ordering::Release,
+ )
+ }
+}
--- /dev/null
+pub mod debug;
+mod dep_node;
+mod graph;
+mod prev;
+mod query;
+mod safe;
+mod serialized;
+
+pub(crate) use self::dep_node::DepNodeParams;
+pub use self::dep_node::{label_strs, DepConstructor, DepKind, DepNode, WorkProductId};
+pub use self::graph::WorkProductFileKind;
+pub use self::graph::{hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, WorkProduct};
+pub use self::prev::PreviousDepGraph;
+pub use self::query::DepGraphQuery;
+pub use self::safe::AssertDepGraphSafe;
+pub use self::safe::DepGraphSafe;
+pub use self::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
--- /dev/null
+use super::dep_node::DepNode;
+use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
+use crate::ich::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+
+#[derive(Debug, RustcEncodable, RustcDecodable, Default)]
+pub struct PreviousDepGraph {
+ data: SerializedDepGraph,
+ index: FxHashMap<DepNode, SerializedDepNodeIndex>,
+}
+
+impl PreviousDepGraph {
+ pub fn new(data: SerializedDepGraph) -> PreviousDepGraph {
+ let index: FxHashMap<_, _> =
+ data.nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
+ PreviousDepGraph { data, index }
+ }
+
+ #[inline]
+ pub fn edge_targets_from(
+ &self,
+ dep_node_index: SerializedDepNodeIndex,
+ ) -> &[SerializedDepNodeIndex] {
+ self.data.edge_targets_from(dep_node_index)
+ }
+
+ #[inline]
+ pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
+ self.data.nodes[dep_node_index]
+ }
+
+ #[inline]
+ pub fn node_to_index(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
+ self.index[dep_node]
+ }
+
+ #[inline]
+ pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
+ self.index.get(dep_node).cloned()
+ }
+
+ #[inline]
+ pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
+ self.index.get(dep_node).map(|&node_index| self.data.fingerprints[node_index])
+ }
+
+ #[inline]
+ pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
+ self.data.fingerprints[dep_node_index]
+ }
+
+ pub fn node_count(&self) -> usize {
+ self.index.len()
+ }
+}
--- /dev/null
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::graph::implementation::{
+ Direction, Graph, NodeIndex, INCOMING, OUTGOING,
+};
+
+use super::DepNode;
+
+pub struct DepGraphQuery {
+ pub graph: Graph<DepNode, ()>,
+ pub indices: FxHashMap<DepNode, NodeIndex>,
+}
+
+impl DepGraphQuery {
+ pub fn new(nodes: &[DepNode], edges: &[(DepNode, DepNode)]) -> DepGraphQuery {
+ let mut graph = Graph::with_capacity(nodes.len(), edges.len());
+ let mut indices = FxHashMap::default();
+ for node in nodes {
+ indices.insert(node.clone(), graph.add_node(node.clone()));
+ }
+
+ for &(ref source, ref target) in edges {
+ let source = indices[source];
+ let target = indices[target];
+ graph.add_edge(source, target, ());
+ }
+
+ DepGraphQuery { graph, indices }
+ }
+
+ pub fn contains_node(&self, node: &DepNode) -> bool {
+ self.indices.contains_key(&node)
+ }
+
+ pub fn nodes(&self) -> Vec<&DepNode> {
+ self.graph.all_nodes().iter().map(|n| &n.data).collect()
+ }
+
+ pub fn edges(&self) -> Vec<(&DepNode, &DepNode)> {
+ self.graph
+ .all_edges()
+ .iter()
+ .map(|edge| (edge.source(), edge.target()))
+ .map(|(s, t)| (self.graph.node_data(s), self.graph.node_data(t)))
+ .collect()
+ }
+
+ fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
+ if let Some(&index) = self.indices.get(node) {
+ self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
+ } else {
+ vec![]
+ }
+ }
+
+ /// All nodes reachable from `node`. In other words, things that
+ /// will have to be recomputed if `node` changes.
+ pub fn transitive_successors(&self, node: &DepNode) -> Vec<&DepNode> {
+ self.reachable_nodes(node, OUTGOING)
+ }
+
+ /// All nodes that can reach `node`.
+ pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
+ self.reachable_nodes(node, INCOMING)
+ }
+
+ /// Just the outgoing edges from `node`.
+ pub fn immediate_successors(&self, node: &DepNode) -> Vec<&DepNode> {
+ if let Some(&index) = self.indices.get(&node) {
+ self.graph.successor_nodes(index).map(|s| self.graph.node_data(s)).collect()
+ } else {
+ vec![]
+ }
+ }
+}
--- /dev/null
+//! The `DepGraphSafe` trait
+
+use crate::ty::TyCtxt;
+
+use rustc_ast::ast::NodeId;
+use rustc_hir::def_id::DefId;
+use rustc_hir::BodyId;
+
+/// The `DepGraphSafe` trait is used to specify what kinds of values
+/// are safe to "leak" into a task. The idea is that this should be
+/// only be implemented for things like the tcx as well as various id
+/// types, which will create reads in the dep-graph whenever the trait
+/// loads anything that might depend on the input program.
+pub trait DepGraphSafe {}
+
+/// A `BodyId` on its own doesn't give access to any particular state.
+/// You must fetch the state from the various maps or generate
+/// on-demand queries, all of which create reads.
+impl DepGraphSafe for BodyId {}
+
+/// A `NodeId` on its own doesn't give access to any particular state.
+/// You must fetch the state from the various maps or generate
+/// on-demand queries, all of which create reads.
+impl DepGraphSafe for NodeId {}
+
+/// A `DefId` on its own doesn't give access to any particular state.
+/// You must fetch the state from the various maps or generate
+/// on-demand queries, all of which create reads.
+impl DepGraphSafe for DefId {}
+
+/// The type context itself can be used to access all kinds of tracked
+/// state, but those accesses should always generate read events.
+impl<'tcx> DepGraphSafe for TyCtxt<'tcx> {}
+
+/// Tuples make it easy to build up state.
+impl<A, B> DepGraphSafe for (A, B)
+where
+ A: DepGraphSafe,
+ B: DepGraphSafe,
+{
+}
+
+/// Shared ref to dep-graph-safe stuff should still be dep-graph-safe.
+impl<'a, A> DepGraphSafe for &'a A where A: DepGraphSafe {}
+
+/// Mut ref to dep-graph-safe stuff should still be dep-graph-safe.
+impl<'a, A> DepGraphSafe for &'a mut A where A: DepGraphSafe {}
+
+/// No data here! :)
+impl DepGraphSafe for () {}
+
+/// A convenient override that lets you pass arbitrary state into a
+/// task. Every use should be accompanied by a comment explaining why
+/// it makes sense (or how it could be refactored away in the future).
+pub struct AssertDepGraphSafe<T>(pub T);
+
+impl<T> DepGraphSafe for AssertDepGraphSafe<T> {}
--- /dev/null
+//! The data that we will serialize and deserialize.
+
+use crate::dep_graph::DepNode;
+use crate::ich::Fingerprint;
+use rustc_index::vec::IndexVec;
+
+rustc_index::newtype_index! {
+ pub struct SerializedDepNodeIndex { .. }
+}
+
+/// Data for use when recompiling the **current crate**.
+#[derive(Debug, RustcEncodable, RustcDecodable, Default)]
+pub struct SerializedDepGraph {
+ /// The set of all DepNodes in the graph
+ pub nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
+ /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
+ /// the DepNode at the same index in the nodes vector.
+ pub fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
+ /// For each DepNode, stores the list of edges originating from that
+ /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
+ /// which holds the actual DepNodeIndices of the target nodes.
+ pub edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
+ /// A flattened list of all edge targets in the graph. Edge sources are
+ /// implicit in edge_list_indices.
+ pub edge_list_data: Vec<SerializedDepNodeIndex>,
+}
+
+impl SerializedDepGraph {
+ #[inline]
+ pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
+ let targets = self.edge_list_indices[source];
+ &self.edge_list_data[targets.0 as usize..targets.1 as usize]
+ }
+}