1 use crate::ty::{self, TyCtxt};
2 use parking_lot::{Condvar, Mutex};
3 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
4 use rustc_data_structures::profiling::QueryInvocationId;
5 use rustc_data_structures::sharded::{self, Sharded};
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
8 use rustc_errors::Diagnostic;
9 use rustc_index::vec::{Idx, IndexVec};
10 use smallvec::SmallVec;
11 use std::collections::hash_map::Entry;
15 use std::sync::atomic::Ordering::Relaxed;
17 use crate::ich::{Fingerprint, StableHashingContext, StableHashingContextProvider};
19 use super::debug::EdgeFilter;
20 use super::dep_node::{DepKind, DepNode, WorkProductId};
21 use super::prev::PreviousDepGraph;
22 use super::query::DepGraphQuery;
23 use super::safe::DepGraphSafe;
24 use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
28 data: Option<Lrc<DepGraphData>>,
30 /// This field is used for assigning DepNodeIndices when running in
31 /// non-incremental mode. Even in non-incremental mode we make sure that
32 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
33 /// ID is used for self-profiling.
34 virtual_dep_node_index: Lrc<AtomicU32>,
37 rustc_index::newtype_index! {
38 pub struct DepNodeIndex { .. }
42 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
45 impl std::convert::From<DepNodeIndex> for QueryInvocationId {
47 fn from(dep_node_index: DepNodeIndex) -> Self {
48 QueryInvocationId(dep_node_index.as_u32())
53 pub enum DepNodeColor {
59 pub fn is_green(self) -> bool {
61 DepNodeColor::Red => false,
62 DepNodeColor::Green(_) => true,
68 /// The new encoding of the dependency graph, optimized for red/green
69 /// tracking. The `current` field is the dependency graph of only the
70 /// current compilation session: We don't merge the previous dep-graph into
71 /// current one anymore.
72 current: CurrentDepGraph,
74 /// The dep-graph from the previous compilation session. It contains all
75 /// nodes and edges as well as all fingerprints of nodes that have them.
76 previous: PreviousDepGraph,
78 colors: DepNodeColorMap,
80 /// A set of loaded diagnostics that is in the progress of being emitted.
81 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
83 /// Used to wait for diagnostics to be emitted.
84 emitting_diagnostics_cond_var: Condvar,
86 /// When we load, there may be `.o` files, cached MIR, or other such
87 /// things available to us. If we find that they are not dirty, we
88 /// load the path to the file storing those work-products here into
89 /// this map. We can later look for and extract that data.
90 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
92 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
95 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
97 R: for<'a> HashStable<StableHashingContext<'a>>,
99 let mut stable_hasher = StableHasher::new();
100 result.hash_stable(hcx, &mut stable_hasher);
102 Some(stable_hasher.finish())
107 prev_graph: PreviousDepGraph,
108 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
110 let prev_graph_node_count = prev_graph.node_count();
113 data: Some(Lrc::new(DepGraphData {
114 previous_work_products: prev_work_products,
115 dep_node_debug: Default::default(),
116 current: CurrentDepGraph::new(prev_graph_node_count),
117 emitting_diagnostics: Default::default(),
118 emitting_diagnostics_cond_var: Condvar::new(),
119 previous: prev_graph,
120 colors: DepNodeColorMap::new(prev_graph_node_count),
122 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
126 pub fn new_disabled() -> DepGraph {
127 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
130 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
132 pub fn is_fully_enabled(&self) -> bool {
136 pub fn query(&self) -> DepGraphQuery {
137 let data = self.data.as_ref().unwrap().current.data.lock();
138 let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
139 let mut edges = Vec::new();
140 for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
141 for &edge_target in edge_targets.iter() {
142 let to = data[edge_target].node;
143 edges.push((from, to));
147 DepGraphQuery::new(&nodes[..], &edges[..])
150 pub fn assert_ignored(&self) {
151 if let Some(..) = self.data {
152 ty::tls::with_context_opt(|icx| {
153 let icx = if let Some(icx) = icx { icx } else { return };
154 assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
159 pub fn with_ignore<OP, R>(&self, op: OP) -> R
163 ty::tls::with_context(|icx| {
164 let icx = ty::tls::ImplicitCtxt { task_deps: None, ..icx.clone() };
166 ty::tls::enter_context(&icx, |_| op())
170 /// Starts a new dep-graph task. Dep-graph tasks are specified
171 /// using a free function (`task`) and **not** a closure -- this
172 /// is intentional because we want to exercise tight control over
173 /// what state they have access to. In particular, we want to
174 /// prevent implicit 'leaks' of tracked state into the task (which
175 /// could then be read without generating correct edges in the
176 /// dep-graph -- see the [rustc guide] for more details on
177 /// the dep-graph). To this end, the task function gets exactly two
178 /// pieces of state: the context `cx` and an argument `arg`. Both
179 /// of these bits of state must be of some type that implements
180 /// `DepGraphSafe` and hence does not leak.
182 /// The choice of two arguments is not fundamental. One argument
183 /// would work just as well, since multiple values can be
184 /// collected using tuples. However, using two arguments works out
185 /// to be quite convenient, since it is common to need a context
186 /// (`cx`) and some argument (e.g., a `DefId` identifying what
187 /// item to process).
189 /// For cases where you need some other number of arguments:
191 /// - If you only need one argument, just use `()` for the `arg`
193 /// - If you need 3+ arguments, use a tuple for the
196 /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html
197 pub fn with_task<'a, C, A, R>(
203 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
204 ) -> (R, DepNodeIndex)
206 C: DepGraphSafe + StableHashingContextProvider<'a>,
216 #[cfg(debug_assertions)]
218 reads: SmallVec::new(),
219 read_set: Default::default(),
222 |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint),
227 /// Creates a new dep-graph input with value `input`
228 pub fn input_task<'a, C, R>(&self, key: DepNode, cx: C, input: R) -> (R, DepNodeIndex)
230 C: DepGraphSafe + StableHashingContextProvider<'a>,
231 R: for<'b> HashStable<StableHashingContext<'b>>,
233 fn identity_fn<C, A>(_: C, arg: A) -> A {
244 |data, key, fingerprint, _| data.alloc_node(key, SmallVec::new(), fingerprint),
249 fn with_task_impl<'a, C, A, R>(
256 create_task: fn(DepNode) -> Option<TaskDeps>,
257 finish_task_and_alloc_depnode: fn(
263 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
264 ) -> (R, DepNodeIndex)
266 C: DepGraphSafe + StableHashingContextProvider<'a>,
268 if let Some(ref data) = self.data {
269 let task_deps = create_task(key).map(|deps| Lock::new(deps));
271 // In incremental mode, hash the result of the task. We don't
272 // do anything with the hash yet, but we are computing it
274 // - we make sure that the infrastructure works and
275 // - we can get an idea of the runtime cost.
276 let mut hcx = cx.get_stable_hashing_context();
278 let result = if no_tcx {
281 ty::tls::with_context(|icx| {
283 ty::tls::ImplicitCtxt { task_deps: task_deps.as_ref(), ..icx.clone() };
285 ty::tls::enter_context(&icx, |_| task(cx, arg))
289 let current_fingerprint = hash_result(&mut hcx, &result);
291 let dep_node_index = finish_task_and_alloc_depnode(
294 current_fingerprint.unwrap_or(Fingerprint::ZERO),
295 task_deps.map(|lock| lock.into_inner()),
298 let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks;
300 // Determine the color of the new DepNode.
301 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
302 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
304 let color = if let Some(current_fingerprint) = current_fingerprint {
305 if current_fingerprint == prev_fingerprint {
307 eprintln!("[task::green] {:?}", key);
309 DepNodeColor::Green(dep_node_index)
312 eprintln!("[task::red] {:?}", key);
318 eprintln!("[task::unknown] {:?}", key);
320 // Mark the node as Red if we can't hash the result
325 data.colors.get(prev_index).is_none(),
326 "DepGraph::with_task() - Duplicate DepNodeColor \
331 data.colors.insert(prev_index, color);
334 eprintln!("[task::new] {:?}", key);
338 (result, dep_node_index)
340 (task(cx, arg), self.next_virtual_depnode_index())
344 /// Executes something within an "anonymous" task, that is, a task the
345 /// `DepNode` of which is determined by the list of inputs it read from.
346 pub fn with_anon_task<OP, R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
350 if let Some(ref data) = self.data {
351 let (result, task_deps) = ty::tls::with_context(|icx| {
352 let task_deps = Lock::new(TaskDeps {
353 #[cfg(debug_assertions)]
355 reads: SmallVec::new(),
356 read_set: Default::default(),
360 let icx = ty::tls::ImplicitCtxt { task_deps: Some(&task_deps), ..icx.clone() };
362 ty::tls::enter_context(&icx, |_| op())
365 (r, task_deps.into_inner())
367 let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps);
368 (result, dep_node_index)
370 (op(), self.next_virtual_depnode_index())
374 /// Executes something within an "eval-always" task which is a task
375 /// that runs whenever anything changes.
376 pub fn with_eval_always_task<'a, C, A, R>(
382 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
383 ) -> (R, DepNodeIndex)
385 C: DepGraphSafe + StableHashingContextProvider<'a>,
394 |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint),
400 pub fn read(&self, v: DepNode) {
401 if let Some(ref data) = self.data {
402 let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
403 if let Some(dep_node_index) = map.get(&v).copied() {
405 data.read_index(dep_node_index);
407 bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
413 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
414 if let Some(ref data) = self.data {
415 data.read_index(dep_node_index);
420 pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
426 .get_shard_by_value(dep_node)
434 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
435 if let Some(ref data) = self.data {
438 .get_shard_by_value(&dep_node)
440 .contains_key(dep_node)
447 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
448 let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
449 data[dep_node_index].fingerprint
452 pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
453 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
457 pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
458 self.data.as_ref().unwrap().previous.node_to_index(dep_node)
461 /// Checks whether a previous work product exists for `v` and, if
462 /// so, return the path that leads to it. Used to skip doing work.
463 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
464 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
467 /// Access the map of work-products created during the cached run. Only
468 /// used during saving of the dep-graph.
469 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
470 &self.data.as_ref().unwrap().previous_work_products
474 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
476 F: FnOnce() -> String,
478 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
480 if dep_node_debug.borrow().contains_key(&dep_node) {
483 let debug_str = debug_str_gen();
484 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
487 pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
488 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
491 pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
492 if cfg!(debug_assertions) {
493 let current_dep_graph = &self.data.as_ref().unwrap().current;
496 current_dep_graph.total_read_count.load(Relaxed),
497 current_dep_graph.total_duplicate_read_count.load(Relaxed),
504 pub fn serialize(&self) -> SerializedDepGraph {
505 let data = self.data.as_ref().unwrap().current.data.lock();
507 let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
508 data.iter().map(|d| d.fingerprint).collect();
509 let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect();
511 let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
513 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
514 let mut edge_list_data = Vec::with_capacity(total_edge_count);
516 for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
517 let start = edge_list_data.len() as u32;
518 // This should really just be a memcpy :/
519 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
520 let end = edge_list_data.len() as u32;
522 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
523 edge_list_indices.push((start, end));
526 debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
527 debug_assert_eq!(edge_list_data.len(), total_edge_count);
529 SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
532 pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
533 if let Some(ref data) = self.data {
534 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
535 return data.colors.get(prev_index);
537 // This is a node that did not exist in the previous compilation
538 // session, so we consider it to be red.
539 return Some(DepNodeColor::Red);
546 /// Try to read a node index for the node dep_node.
547 /// A node will have an index, when it's already been marked green, or when we can mark it
548 /// green. This function will mark the current task as a reader of the specified node, when
549 /// a node index can be found for that node.
550 pub fn try_mark_green_and_read(
554 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
555 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
556 debug_assert!(self.is_green(&dep_node));
557 self.read_index(dep_node_index);
558 (prev_index, dep_node_index)
562 pub fn try_mark_green(
566 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
567 debug_assert!(!dep_node.kind.is_eval_always());
569 // Return None if the dep graph is disabled
570 let data = self.data.as_ref()?;
572 // Return None if the dep node didn't exist in the previous session
573 let prev_index = data.previous.node_to_index_opt(dep_node)?;
575 match data.colors.get(prev_index) {
576 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
577 Some(DepNodeColor::Red) => None,
579 // This DepNode and the corresponding query invocation existed
580 // in the previous compilation session too, so we can try to
581 // mark it as green by recursively marking all of its
582 // dependencies green.
583 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
584 .map(|dep_node_index| (prev_index, dep_node_index))
589 /// Try to mark a dep-node which existed in the previous compilation session as green.
590 fn try_mark_previous_green<'tcx>(
594 prev_dep_node_index: SerializedDepNodeIndex,
596 ) -> Option<DepNodeIndex> {
597 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
599 #[cfg(not(parallel_compiler))]
605 .get_shard_by_value(dep_node)
607 .contains_key(dep_node)
609 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
612 // We never try to mark eval_always nodes as green
613 debug_assert!(!dep_node.kind.is_eval_always());
615 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
617 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
619 let mut current_deps = SmallVec::new();
621 for &dep_dep_node_index in prev_deps {
622 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
624 match dep_dep_node_color {
625 Some(DepNodeColor::Green(node_index)) => {
626 // This dependency has been marked as green before, we are
627 // still fine and can continue with checking the other
630 "try_mark_previous_green({:?}) --- found dependency {:?} to \
631 be immediately green",
633 data.previous.index_to_node(dep_dep_node_index)
635 current_deps.push(node_index);
637 Some(DepNodeColor::Red) => {
638 // We found a dependency the value of which has changed
639 // compared to the previous compilation session. We cannot
640 // mark the DepNode as green and also don't need to bother
641 // with checking any of the other dependencies.
643 "try_mark_previous_green({:?}) - END - dependency {:?} was \
646 data.previous.index_to_node(dep_dep_node_index)
651 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
653 // We don't know the state of this dependency. If it isn't
654 // an eval_always node, let's try to mark it green recursively.
655 if !dep_dep_node.kind.is_eval_always() {
657 "try_mark_previous_green({:?}) --- state of dependency {:?} \
658 is unknown, trying to mark it green",
659 dep_node, dep_dep_node
662 let node_index = self.try_mark_previous_green(
668 if let Some(node_index) = node_index {
670 "try_mark_previous_green({:?}) --- managed to MARK \
671 dependency {:?} as green",
672 dep_node, dep_dep_node
674 current_deps.push(node_index);
678 match dep_dep_node.kind {
679 DepKind::Hir | DepKind::HirBody | DepKind::CrateMetadata => {
680 if dep_dep_node.extract_def_id(tcx).is_none() {
681 // If the node does not exist anymore, we
682 // just fail to mark green.
685 // If the node does exist, it should have
686 // been pre-allocated.
688 "DepNode {:?} should have been \
689 pre-allocated but wasn't.",
695 // For other kinds of nodes it's OK to be
701 // We failed to mark it green, so we try to force the query.
703 "try_mark_previous_green({:?}) --- trying to force \
705 dep_node, dep_dep_node
707 if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
708 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
710 match dep_dep_node_color {
711 Some(DepNodeColor::Green(node_index)) => {
713 "try_mark_previous_green({:?}) --- managed to \
714 FORCE dependency {:?} to green",
715 dep_node, dep_dep_node
717 current_deps.push(node_index);
719 Some(DepNodeColor::Red) => {
721 "try_mark_previous_green({:?}) - END - \
722 dependency {:?} was red after forcing",
723 dep_node, dep_dep_node
728 if !tcx.sess.has_errors_or_delayed_span_bugs() {
730 "try_mark_previous_green() - Forcing the DepNode \
731 should have set its color"
734 // If the query we just forced has resulted in
735 // some kind of compilation error, we cannot rely on
736 // the dep-node color having been properly updated.
737 // This means that the query system has reached an
738 // invalid state. We let the compiler continue (by
739 // returning `None`) so it can emit error messages
740 // and wind down, but rely on the fact that this
741 // invalid state will not be persisted to the
742 // incremental compilation cache because of
743 // compilation errors being present.
745 "try_mark_previous_green({:?}) - END - \
746 dependency {:?} resulted in compilation error",
747 dep_node, dep_dep_node
754 // The DepNode could not be forced.
756 "try_mark_previous_green({:?}) - END - dependency {:?} \
757 could not be forced",
758 dep_node, dep_dep_node
766 // If we got here without hitting a `return` that means that all
767 // dependencies of this DepNode could be marked as green. Therefore we
768 // can also mark this DepNode as green.
770 // There may be multiple threads trying to mark the same dep node green concurrently
772 let dep_node_index = {
773 // Copy the fingerprint from the previous graph,
774 // so we don't have to recompute it
775 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
777 // We allocating an entry for the node in the current dependency graph and
778 // adding all the appropriate edges imported from the previous graph
779 data.current.intern_node(*dep_node, current_deps, fingerprint)
782 // ... emitting any stored diagnostic ...
784 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
785 // Maybe store a list on disk and encode this fact in the DepNodeState
786 let diagnostics = tcx.queries.on_disk_cache.load_diagnostics(tcx, prev_dep_node_index);
788 #[cfg(not(parallel_compiler))]
790 data.colors.get(prev_dep_node_index).is_none(),
791 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
796 if unlikely!(diagnostics.len() > 0) {
797 self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
800 // ... and finally storing a "Green" entry in the color map.
801 // Multiple threads can all write the same color here
802 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
804 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
808 /// Atomically emits some loaded diagnostics.
809 /// This may be called concurrently on multiple threads for the same dep node.
812 fn emit_diagnostics<'tcx>(
816 dep_node_index: DepNodeIndex,
817 prev_dep_node_index: SerializedDepNodeIndex,
818 diagnostics: Vec<Diagnostic>,
820 let mut emitting = data.emitting_diagnostics.lock();
822 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
823 // The node is already green so diagnostics must have been emitted already
827 if emitting.insert(dep_node_index) {
828 // We were the first to insert the node in the set so this thread
829 // must emit the diagnostics and signal other potentially waiting
833 // Promote the previous diagnostics to the current session.
834 tcx.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics.clone().into());
836 let handle = tcx.sess.diagnostic();
838 for diagnostic in diagnostics {
839 handle.emit_diagnostic(&diagnostic);
842 // Mark the node as green now that diagnostics are emitted
843 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
845 // Remove the node from the set
846 data.emitting_diagnostics.lock().remove(&dep_node_index);
849 data.emitting_diagnostics_cond_var.notify_all();
851 // We must wait for the other thread to finish emitting the diagnostic
854 data.emitting_diagnostics_cond_var.wait(&mut emitting);
855 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
863 // Returns true if the given node has been marked as green during the
864 // current compilation session. Used in various assertions
865 pub fn is_green(&self, dep_node: &DepNode) -> bool {
866 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
869 // This method loads all on-disk cacheable query results into memory, so
870 // they can be written out to the new cache file again. Most query results
871 // will already be in memory but in the case where we marked something as
872 // green but then did not need the value, that value will never have been
875 // This method will only load queries that will end up in the disk cache.
876 // Other queries will not be executed.
877 pub fn exec_cache_promotions(&self, tcx: TyCtxt<'_>) {
878 let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
880 let data = self.data.as_ref().unwrap();
881 for prev_index in data.colors.values.indices() {
882 match data.colors.get(prev_index) {
883 Some(DepNodeColor::Green(_)) => {
884 let dep_node = data.previous.index_to_node(prev_index);
885 dep_node.try_load_from_on_disk_cache(tcx);
887 None | Some(DepNodeColor::Red) => {
888 // We can skip red nodes because a node can only be marked
889 // as red if the query result was recomputed and thus is
890 // already in memory.
896 fn next_virtual_depnode_index(&self) -> DepNodeIndex {
897 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
898 DepNodeIndex::from_u32(index)
902 /// A "work product" is an intermediate result that we save into the
903 /// incremental directory for later re-use. The primary example are
904 /// the object files that we save for each partition at code
907 /// Each work product is associated with a dep-node, representing the
908 /// process that produced the work-product. If that dep-node is found
909 /// to be dirty when we load up, then we will delete the work-product
910 /// at load time. If the work-product is found to be clean, then we
911 /// will keep a record in the `previous_work_products` list.
913 /// In addition, work products have an associated hash. This hash is
914 /// an extra hash that can be used to decide if the work-product from
915 /// a previous compilation can be re-used (in addition to the dirty
918 /// As the primary example, consider the object files we generate for
919 /// each partition. In the first run, we create partitions based on
920 /// the symbols that need to be compiled. For each partition P, we
921 /// hash the symbols in P and create a `WorkProduct` record associated
922 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
925 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
926 /// judged to be clean (which means none of the things we read to
927 /// generate the partition were found to be dirty), it will be loaded
928 /// into previous work products. We will then regenerate the set of
929 /// symbols in the partition P and hash them (note that new symbols
930 /// may be added -- for example, new monomorphizations -- even if
931 /// nothing in P changed!). We will compare that hash against the
932 /// previous hash. If it matches up, we can reuse the object file.
933 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
934 pub struct WorkProduct {
935 pub cgu_name: String,
936 /// Saved files associated with this CGU.
937 pub saved_files: Vec<(WorkProductFileKind, String)>,
940 #[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
941 pub enum WorkProductFileKind {
950 edges: SmallVec<[DepNodeIndex; 8]>,
951 fingerprint: Fingerprint,
954 /// `CurrentDepGraph` stores the dependency graph for the current session.
955 /// It will be populated as we run queries or tasks.
957 /// The nodes in it are identified by an index (`DepNodeIndex`).
958 /// The data for each node is stored in its `DepNodeData`, found in the `data` field.
960 /// We never remove nodes from the graph: they are only added.
962 /// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
963 /// locked separately. Operations that take a `DepNodeIndex` typically just access
966 /// The only operation that must manipulate both locks is adding new nodes, in which case
967 /// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
968 /// acquire the lock on `data.`
969 pub(super) struct CurrentDepGraph {
970 data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
971 node_to_node_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
973 /// Used to trap when a specific edge is added to the graph.
974 /// This is used for debug purposes and is only active with `debug_assertions`.
976 forbidden_edge: Option<EdgeFilter>,
978 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
979 /// their edges. This has the beneficial side-effect that multiple anonymous
980 /// nodes can be coalesced into one without changing the semantics of the
981 /// dependency graph. However, the merging of nodes can lead to a subtle
982 /// problem during red-green marking: The color of an anonymous node from
983 /// the current session might "shadow" the color of the node with the same
984 /// ID from the previous session. In order to side-step this problem, we make
985 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
986 /// This is implemented by mixing a session-key into the ID fingerprint of
987 /// each anon node. The session-key is just a random number generated when
988 /// the `DepGraph` is created.
989 anon_id_seed: Fingerprint,
991 /// These are simple counters that are for profiling and
992 /// debugging and only active with `debug_assertions`.
993 total_read_count: AtomicU64,
994 total_duplicate_read_count: AtomicU64,
997 impl CurrentDepGraph {
998 fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
999 use std::time::{SystemTime, UNIX_EPOCH};
1001 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1002 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
1003 let mut stable_hasher = StableHasher::new();
1004 nanos.hash(&mut stable_hasher);
1006 let forbidden_edge = if cfg!(debug_assertions) {
1007 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1008 Ok(s) => match EdgeFilter::new(&s) {
1010 Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1018 // Pre-allocate the dep node structures. We over-allocate a little so
1019 // that we hopefully don't have to re-allocate during this compilation
1020 // session. The over-allocation is 2% plus a small constant to account
1021 // for the fact that in very small crates 2% might not be enough.
1022 let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
1025 data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
1026 node_to_node_index: Sharded::new(|| {
1027 FxHashMap::with_capacity_and_hasher(
1028 new_node_count_estimate / sharded::SHARDS,
1032 anon_id_seed: stable_hasher.finish(),
1034 total_read_count: AtomicU64::new(0),
1035 total_duplicate_read_count: AtomicU64::new(0),
1042 task_deps: TaskDeps,
1043 fingerprint: Fingerprint,
1045 self.alloc_node(node, task_deps.reads, fingerprint)
1048 fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
1049 debug_assert!(!kind.is_eval_always());
1051 let mut hasher = StableHasher::new();
1053 // The dep node indices are hashed here instead of hashing the dep nodes of the
1054 // dependencies. These indices may refer to different nodes per session, but this isn't
1055 // a problem here because we that ensure the final dep node hash is per session only by
1056 // combining it with the per session random number `anon_id_seed`. This hash only need
1057 // to map the dependencies to a single value on a per session basis.
1058 task_deps.reads.hash(&mut hasher);
1060 let target_dep_node = DepNode {
1063 // Fingerprint::combine() is faster than sending Fingerprint
1064 // through the StableHasher (at least as long as StableHasher
1066 hash: self.anon_id_seed.combine(hasher.finish()),
1069 self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
1075 edges: SmallVec<[DepNodeIndex; 8]>,
1076 fingerprint: Fingerprint,
1079 !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node)
1081 self.intern_node(dep_node, edges, fingerprint)
1087 edges: SmallVec<[DepNodeIndex; 8]>,
1088 fingerprint: Fingerprint,
1090 match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
1091 Entry::Occupied(entry) => *entry.get(),
1092 Entry::Vacant(entry) => {
1093 let mut data = self.data.lock();
1094 let dep_node_index = DepNodeIndex::new(data.len());
1095 data.push(DepNodeData { node: dep_node, edges, fingerprint });
1096 entry.insert(dep_node_index);
1104 fn read_index(&self, source: DepNodeIndex) {
1105 ty::tls::with_context_opt(|icx| {
1106 let icx = if let Some(icx) = icx { icx } else { return };
1107 if let Some(task_deps) = icx.task_deps {
1108 let mut task_deps = task_deps.lock();
1109 if cfg!(debug_assertions) {
1110 self.current.total_read_count.fetch_add(1, Relaxed);
1112 if task_deps.read_set.insert(source) {
1113 task_deps.reads.push(source);
1115 #[cfg(debug_assertions)]
1117 if let Some(target) = task_deps.node {
1118 let data = self.current.data.lock();
1119 if let Some(ref forbidden_edge) = self.current.forbidden_edge {
1120 let source = data[source].node;
1121 if forbidden_edge.test(&source, &target) {
1122 bug!("forbidden edge {:?} -> {:?} created", source, target)
1127 } else if cfg!(debug_assertions) {
1128 self.current.total_duplicate_read_count.fetch_add(1, Relaxed);
1135 pub struct TaskDeps {
1136 #[cfg(debug_assertions)]
1137 node: Option<DepNode>,
1138 reads: SmallVec<[DepNodeIndex; 8]>,
1139 read_set: FxHashSet<DepNodeIndex>,
1142 // A data structure that stores Option<DepNodeColor> values as a contiguous
1143 // array, using one u32 per entry.
1144 struct DepNodeColorMap {
1145 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1148 const COMPRESSED_NONE: u32 = 0;
1149 const COMPRESSED_RED: u32 = 1;
1150 const COMPRESSED_FIRST_GREEN: u32 = 2;
1152 impl DepNodeColorMap {
1153 fn new(size: usize) -> DepNodeColorMap {
1154 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1157 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1158 match self.values[index].load(Ordering::Acquire) {
1159 COMPRESSED_NONE => None,
1160 COMPRESSED_RED => Some(DepNodeColor::Red),
1162 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1167 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1168 self.values[index].store(
1170 DepNodeColor::Red => COMPRESSED_RED,
1171 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,