1 use crate::ty::{self, TyCtxt};
2 use errors::Diagnostic;
3 use parking_lot::{Condvar, Mutex};
4 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
5 use rustc_data_structures::sharded::{self, Sharded};
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
8 use rustc_index::vec::{Idx, IndexVec};
9 use smallvec::SmallVec;
10 use std::collections::hash_map::Entry;
11 use rustc_data_structures::profiling::QueryInvocationId;
12 use std::sync::atomic::Ordering::Relaxed;
16 use std::sync::atomic::Ordering::SeqCst;
18 use crate::ich::{Fingerprint, StableHashingContext, StableHashingContextProvider};
20 use super::debug::EdgeFilter;
21 use super::dep_node::{DepKind, DepNode, WorkProductId};
22 use super::prev::PreviousDepGraph;
23 use super::query::DepGraphQuery;
24 use super::safe::DepGraphSafe;
25 use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
29 data: Option<Lrc<DepGraphData>>,
31 /// This field is used for assigning DepNodeIndices when running in
32 /// non-incremental mode. Even in non-incremental mode we make sure that
33 /// each task as a `DepNodeIndex` that uniquely identifies it. This unique
34 /// ID is used for self-profiling.
35 virtual_dep_node_index: Lrc<AtomicU32>,
38 rustc_index::newtype_index! {
39 pub struct DepNodeIndex { .. }
43 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
46 impl std::convert::From<DepNodeIndex> for QueryInvocationId {
48 fn from(dep_node_index: DepNodeIndex) -> Self {
49 QueryInvocationId(dep_node_index.as_u32())
54 pub enum DepNodeColor {
60 pub fn is_green(self) -> bool {
62 DepNodeColor::Red => false,
63 DepNodeColor::Green(_) => true,
69 /// The new encoding of the dependency graph, optimized for red/green
70 /// tracking. The `current` field is the dependency graph of only the
71 /// current compilation session: We don't merge the previous dep-graph into
72 /// current one anymore.
73 current: CurrentDepGraph,
75 /// The dep-graph from the previous compilation session. It contains all
76 /// nodes and edges as well as all fingerprints of nodes that have them.
77 previous: PreviousDepGraph,
79 colors: DepNodeColorMap,
81 /// A set of loaded diagnostics that is in the progress of being emitted.
82 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
84 /// Used to wait for diagnostics to be emitted.
85 emitting_diagnostics_cond_var: Condvar,
87 /// When we load, there may be `.o` files, cached MIR, or other such
88 /// things available to us. If we find that they are not dirty, we
89 /// load the path to the file storing those work-products here into
90 /// this map. We can later look for and extract that data.
91 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
93 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
96 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
98 R: for<'a> HashStable<StableHashingContext<'a>>,
100 let mut stable_hasher = StableHasher::new();
101 result.hash_stable(hcx, &mut stable_hasher);
103 Some(stable_hasher.finish())
108 prev_graph: PreviousDepGraph,
109 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
111 let prev_graph_node_count = prev_graph.node_count();
114 data: Some(Lrc::new(DepGraphData {
115 previous_work_products: prev_work_products,
116 dep_node_debug: Default::default(),
117 current: CurrentDepGraph::new(prev_graph_node_count),
118 emitting_diagnostics: Default::default(),
119 emitting_diagnostics_cond_var: Condvar::new(),
120 previous: prev_graph,
121 colors: DepNodeColorMap::new(prev_graph_node_count),
123 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
127 pub fn new_disabled() -> DepGraph {
130 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
134 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
136 pub fn is_fully_enabled(&self) -> bool {
140 pub fn query(&self) -> DepGraphQuery {
141 let data = self.data.as_ref().unwrap().current.data.lock();
142 let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
143 let mut edges = Vec::new();
144 for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
145 for &edge_target in edge_targets.iter() {
146 let to = data[edge_target].node;
147 edges.push((from, to));
151 DepGraphQuery::new(&nodes[..], &edges[..])
154 pub fn assert_ignored(&self) {
155 if let Some(..) = self.data {
156 ty::tls::with_context_opt(|icx| {
157 let icx = if let Some(icx) = icx { icx } else { return };
158 assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
163 pub fn with_ignore<OP, R>(&self, op: OP) -> R
167 ty::tls::with_context(|icx| {
168 let icx = ty::tls::ImplicitCtxt { task_deps: None, ..icx.clone() };
170 ty::tls::enter_context(&icx, |_| op())
174 /// Starts a new dep-graph task. Dep-graph tasks are specified
175 /// using a free function (`task`) and **not** a closure -- this
176 /// is intentional because we want to exercise tight control over
177 /// what state they have access to. In particular, we want to
178 /// prevent implicit 'leaks' of tracked state into the task (which
179 /// could then be read without generating correct edges in the
180 /// dep-graph -- see the [rustc guide] for more details on
181 /// the dep-graph). To this end, the task function gets exactly two
182 /// pieces of state: the context `cx` and an argument `arg`. Both
183 /// of these bits of state must be of some type that implements
184 /// `DepGraphSafe` and hence does not leak.
186 /// The choice of two arguments is not fundamental. One argument
187 /// would work just as well, since multiple values can be
188 /// collected using tuples. However, using two arguments works out
189 /// to be quite convenient, since it is common to need a context
190 /// (`cx`) and some argument (e.g., a `DefId` identifying what
191 /// item to process).
193 /// For cases where you need some other number of arguments:
195 /// - If you only need one argument, just use `()` for the `arg`
197 /// - If you need 3+ arguments, use a tuple for the
200 /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html
201 pub fn with_task<'a, C, A, R>(
207 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
208 ) -> (R, DepNodeIndex)
210 C: DepGraphSafe + StableHashingContextProvider<'a>,
220 #[cfg(debug_assertions)]
222 reads: SmallVec::new(),
223 read_set: Default::default(),
226 |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint),
231 /// Creates a new dep-graph input with value `input`
232 pub fn input_task<'a, C, R>(&self, key: DepNode, cx: C, input: R) -> (R, DepNodeIndex)
234 C: DepGraphSafe + StableHashingContextProvider<'a>,
235 R: for<'b> HashStable<StableHashingContext<'b>>,
237 fn identity_fn<C, A>(_: C, arg: A) -> A {
248 |data, key, fingerprint, _| data.alloc_node(key, SmallVec::new(), fingerprint),
253 fn with_task_impl<'a, C, A, R>(
260 create_task: fn(DepNode) -> Option<TaskDeps>,
261 finish_task_and_alloc_depnode: fn(
267 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
268 ) -> (R, DepNodeIndex)
270 C: DepGraphSafe + StableHashingContextProvider<'a>,
272 if let Some(ref data) = self.data {
273 let task_deps = create_task(key).map(|deps| Lock::new(deps));
275 // In incremental mode, hash the result of the task. We don't
276 // do anything with the hash yet, but we are computing it
278 // - we make sure that the infrastructure works and
279 // - we can get an idea of the runtime cost.
280 let mut hcx = cx.get_stable_hashing_context();
282 let result = if no_tcx {
285 ty::tls::with_context(|icx| {
287 ty::tls::ImplicitCtxt { task_deps: task_deps.as_ref(), ..icx.clone() };
289 ty::tls::enter_context(&icx, |_| task(cx, arg))
293 let current_fingerprint = hash_result(&mut hcx, &result);
295 let dep_node_index = finish_task_and_alloc_depnode(
298 current_fingerprint.unwrap_or(Fingerprint::ZERO),
299 task_deps.map(|lock| lock.into_inner()),
302 let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks;
304 // Determine the color of the new DepNode.
305 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
306 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
308 let color = if let Some(current_fingerprint) = current_fingerprint {
309 if current_fingerprint == prev_fingerprint {
311 eprintln!("[task::green] {:?}", key);
313 DepNodeColor::Green(dep_node_index)
316 eprintln!("[task::red] {:?}", key);
322 eprintln!("[task::unknown] {:?}", key);
324 // Mark the node as Red if we can't hash the result
329 data.colors.get(prev_index).is_none(),
330 "DepGraph::with_task() - Duplicate DepNodeColor \
335 data.colors.insert(prev_index, color);
338 eprintln!("[task::new] {:?}", key);
342 (result, dep_node_index)
344 (task(cx, arg), self.next_virtual_depnode_index())
348 /// Executes something within an "anonymous" task, that is, a task the
349 /// `DepNode` of which is determined by the list of inputs it read from.
350 pub fn with_anon_task<OP, R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
354 if let Some(ref data) = self.data {
355 let (result, task_deps) = ty::tls::with_context(|icx| {
356 let task_deps = Lock::new(TaskDeps {
357 #[cfg(debug_assertions)]
359 reads: SmallVec::new(),
360 read_set: Default::default(),
364 let icx = ty::tls::ImplicitCtxt { task_deps: Some(&task_deps), ..icx.clone() };
366 ty::tls::enter_context(&icx, |_| op())
369 (r, task_deps.into_inner())
371 let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps);
372 (result, dep_node_index)
374 (op(), self.next_virtual_depnode_index())
378 /// Executes something within an "eval-always" task which is a task
379 /// that runs whenever anything changes.
380 pub fn with_eval_always_task<'a, C, A, R>(
386 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
387 ) -> (R, DepNodeIndex)
389 C: DepGraphSafe + StableHashingContextProvider<'a>,
398 |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint),
404 pub fn read(&self, v: DepNode) {
405 if let Some(ref data) = self.data {
406 let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
407 if let Some(dep_node_index) = map.get(&v).copied() {
409 data.read_index(dep_node_index);
411 bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
417 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
418 if let Some(ref data) = self.data {
419 data.read_index(dep_node_index);
424 pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
430 .get_shard_by_value(dep_node)
438 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
439 if let Some(ref data) = self.data {
442 .get_shard_by_value(&dep_node)
444 .contains_key(dep_node)
451 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
452 let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
453 data[dep_node_index].fingerprint
456 pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
457 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
461 pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
462 self.data.as_ref().unwrap().previous.node_to_index(dep_node)
465 /// Checks whether a previous work product exists for `v` and, if
466 /// so, return the path that leads to it. Used to skip doing work.
467 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
468 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
471 /// Access the map of work-products created during the cached run. Only
472 /// used during saving of the dep-graph.
473 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
474 &self.data.as_ref().unwrap().previous_work_products
478 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
480 F: FnOnce() -> String,
482 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
484 if dep_node_debug.borrow().contains_key(&dep_node) {
487 let debug_str = debug_str_gen();
488 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
491 pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
492 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
495 pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
496 if cfg!(debug_assertions) {
497 let current_dep_graph = &self.data.as_ref().unwrap().current;
500 current_dep_graph.total_read_count.load(SeqCst),
501 current_dep_graph.total_duplicate_read_count.load(SeqCst),
508 pub fn serialize(&self) -> SerializedDepGraph {
509 let data = self.data.as_ref().unwrap().current.data.lock();
511 let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
512 data.iter().map(|d| d.fingerprint).collect();
513 let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect();
515 let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
517 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
518 let mut edge_list_data = Vec::with_capacity(total_edge_count);
520 for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
521 let start = edge_list_data.len() as u32;
522 // This should really just be a memcpy :/
523 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
524 let end = edge_list_data.len() as u32;
526 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
527 edge_list_indices.push((start, end));
530 debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
531 debug_assert_eq!(edge_list_data.len(), total_edge_count);
533 SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
536 pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
537 if let Some(ref data) = self.data {
538 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
539 return data.colors.get(prev_index);
541 // This is a node that did not exist in the previous compilation
542 // session, so we consider it to be red.
543 return Some(DepNodeColor::Red);
550 /// Try to read a node index for the node dep_node.
551 /// A node will have an index, when it's already been marked green, or when we can mark it
552 /// green. This function will mark the current task as a reader of the specified node, when
553 /// a node index can be found for that node.
554 pub fn try_mark_green_and_read(
558 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
559 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
560 debug_assert!(self.is_green(&dep_node));
561 self.read_index(dep_node_index);
562 (prev_index, dep_node_index)
566 pub fn try_mark_green(
570 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
571 debug_assert!(!dep_node.kind.is_eval_always());
573 // Return None if the dep graph is disabled
574 let data = self.data.as_ref()?;
576 // Return None if the dep node didn't exist in the previous session
577 let prev_index = data.previous.node_to_index_opt(dep_node)?;
579 match data.colors.get(prev_index) {
580 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
581 Some(DepNodeColor::Red) => None,
583 // This DepNode and the corresponding query invocation existed
584 // in the previous compilation session too, so we can try to
585 // mark it as green by recursively marking all of its
586 // dependencies green.
587 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
588 .map(|dep_node_index| (prev_index, dep_node_index))
593 /// Try to mark a dep-node which existed in the previous compilation session as green.
594 fn try_mark_previous_green<'tcx>(
598 prev_dep_node_index: SerializedDepNodeIndex,
600 ) -> Option<DepNodeIndex> {
601 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
603 #[cfg(not(parallel_compiler))]
609 .get_shard_by_value(dep_node)
611 .contains_key(dep_node)
613 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
616 // We never try to mark eval_always nodes as green
617 debug_assert!(!dep_node.kind.is_eval_always());
619 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
621 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
623 let mut current_deps = SmallVec::new();
625 for &dep_dep_node_index in prev_deps {
626 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
628 match dep_dep_node_color {
629 Some(DepNodeColor::Green(node_index)) => {
630 // This dependency has been marked as green before, we are
631 // still fine and can continue with checking the other
634 "try_mark_previous_green({:?}) --- found dependency {:?} to \
635 be immediately green",
637 data.previous.index_to_node(dep_dep_node_index)
639 current_deps.push(node_index);
641 Some(DepNodeColor::Red) => {
642 // We found a dependency the value of which has changed
643 // compared to the previous compilation session. We cannot
644 // mark the DepNode as green and also don't need to bother
645 // with checking any of the other dependencies.
647 "try_mark_previous_green({:?}) - END - dependency {:?} was \
650 data.previous.index_to_node(dep_dep_node_index)
655 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
657 // We don't know the state of this dependency. If it isn't
658 // an eval_always node, let's try to mark it green recursively.
659 if !dep_dep_node.kind.is_eval_always() {
661 "try_mark_previous_green({:?}) --- state of dependency {:?} \
662 is unknown, trying to mark it green",
663 dep_node, dep_dep_node
666 let node_index = self.try_mark_previous_green(
672 if let Some(node_index) = node_index {
674 "try_mark_previous_green({:?}) --- managed to MARK \
675 dependency {:?} as green",
676 dep_node, dep_dep_node
678 current_deps.push(node_index);
682 match dep_dep_node.kind {
683 DepKind::Hir | DepKind::HirBody | DepKind::CrateMetadata => {
684 if dep_dep_node.extract_def_id(tcx).is_none() {
685 // If the node does not exist anymore, we
686 // just fail to mark green.
689 // If the node does exist, it should have
690 // been pre-allocated.
692 "DepNode {:?} should have been \
693 pre-allocated but wasn't.",
699 // For other kinds of nodes it's OK to be
705 // We failed to mark it green, so we try to force the query.
707 "try_mark_previous_green({:?}) --- trying to force \
709 dep_node, dep_dep_node
711 if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
712 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
714 match dep_dep_node_color {
715 Some(DepNodeColor::Green(node_index)) => {
717 "try_mark_previous_green({:?}) --- managed to \
718 FORCE dependency {:?} to green",
719 dep_node, dep_dep_node
721 current_deps.push(node_index);
723 Some(DepNodeColor::Red) => {
725 "try_mark_previous_green({:?}) - END - \
726 dependency {:?} was red after forcing",
727 dep_node, dep_dep_node
732 if !tcx.sess.has_errors_or_delayed_span_bugs() {
734 "try_mark_previous_green() - Forcing the DepNode \
735 should have set its color"
738 // If the query we just forced has resulted in
739 // some kind of compilation error, we cannot rely on
740 // the dep-node color having been properly updated.
741 // This means that the query system has reached an
742 // invalid state. We let the compiler continue (by
743 // returning `None`) so it can emit error messages
744 // and wind down, but rely on the fact that this
745 // invalid state will not be persisted to the
746 // incremental compilation cache because of
747 // compilation errors being present.
749 "try_mark_previous_green({:?}) - END - \
750 dependency {:?} resulted in compilation error",
751 dep_node, dep_dep_node
758 // The DepNode could not be forced.
760 "try_mark_previous_green({:?}) - END - dependency {:?} \
761 could not be forced",
762 dep_node, dep_dep_node
770 // If we got here without hitting a `return` that means that all
771 // dependencies of this DepNode could be marked as green. Therefore we
772 // can also mark this DepNode as green.
774 // There may be multiple threads trying to mark the same dep node green concurrently
776 let dep_node_index = {
777 // Copy the fingerprint from the previous graph,
778 // so we don't have to recompute it
779 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
781 // We allocating an entry for the node in the current dependency graph and
782 // adding all the appropriate edges imported from the previous graph
783 data.current.intern_node(*dep_node, current_deps, fingerprint)
786 // ... emitting any stored diagnostic ...
788 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
789 // Maybe store a list on disk and encode this fact in the DepNodeState
790 let diagnostics = tcx.queries.on_disk_cache.load_diagnostics(tcx, prev_dep_node_index);
792 #[cfg(not(parallel_compiler))]
794 data.colors.get(prev_dep_node_index).is_none(),
795 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
800 if unlikely!(diagnostics.len() > 0) {
801 self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
804 // ... and finally storing a "Green" entry in the color map.
805 // Multiple threads can all write the same color here
806 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
808 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
812 /// Atomically emits some loaded diagnostics.
813 /// This may be called concurrently on multiple threads for the same dep node.
816 fn emit_diagnostics<'tcx>(
820 dep_node_index: DepNodeIndex,
821 prev_dep_node_index: SerializedDepNodeIndex,
822 diagnostics: Vec<Diagnostic>,
824 let mut emitting = data.emitting_diagnostics.lock();
826 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
827 // The node is already green so diagnostics must have been emitted already
831 if emitting.insert(dep_node_index) {
832 // We were the first to insert the node in the set so this thread
833 // must emit the diagnostics and signal other potentially waiting
837 // Promote the previous diagnostics to the current session.
838 tcx.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics.clone().into());
840 let handle = tcx.sess.diagnostic();
842 for diagnostic in diagnostics {
843 handle.emit_diagnostic(&diagnostic);
846 // Mark the node as green now that diagnostics are emitted
847 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
849 // Remove the node from the set
850 data.emitting_diagnostics.lock().remove(&dep_node_index);
853 data.emitting_diagnostics_cond_var.notify_all();
855 // We must wait for the other thread to finish emitting the diagnostic
858 data.emitting_diagnostics_cond_var.wait(&mut emitting);
859 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
867 // Returns true if the given node has been marked as green during the
868 // current compilation session. Used in various assertions
869 pub fn is_green(&self, dep_node: &DepNode) -> bool {
870 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
873 // This method loads all on-disk cacheable query results into memory, so
874 // they can be written out to the new cache file again. Most query results
875 // will already be in memory but in the case where we marked something as
876 // green but then did not need the value, that value will never have been
879 // This method will only load queries that will end up in the disk cache.
880 // Other queries will not be executed.
881 pub fn exec_cache_promotions(&self, tcx: TyCtxt<'_>) {
882 let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
884 let data = self.data.as_ref().unwrap();
885 for prev_index in data.colors.values.indices() {
886 match data.colors.get(prev_index) {
887 Some(DepNodeColor::Green(_)) => {
888 let dep_node = data.previous.index_to_node(prev_index);
889 dep_node.try_load_from_on_disk_cache(tcx);
891 None | Some(DepNodeColor::Red) => {
892 // We can skip red nodes because a node can only be marked
893 // as red if the query result was recomputed and thus is
894 // already in memory.
900 fn next_virtual_depnode_index(&self) -> DepNodeIndex {
901 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
902 DepNodeIndex::from_u32(index)
906 /// A "work product" is an intermediate result that we save into the
907 /// incremental directory for later re-use. The primary example are
908 /// the object files that we save for each partition at code
911 /// Each work product is associated with a dep-node, representing the
912 /// process that produced the work-product. If that dep-node is found
913 /// to be dirty when we load up, then we will delete the work-product
914 /// at load time. If the work-product is found to be clean, then we
915 /// will keep a record in the `previous_work_products` list.
917 /// In addition, work products have an associated hash. This hash is
918 /// an extra hash that can be used to decide if the work-product from
919 /// a previous compilation can be re-used (in addition to the dirty
922 /// As the primary example, consider the object files we generate for
923 /// each partition. In the first run, we create partitions based on
924 /// the symbols that need to be compiled. For each partition P, we
925 /// hash the symbols in P and create a `WorkProduct` record associated
926 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
929 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
930 /// judged to be clean (which means none of the things we read to
931 /// generate the partition were found to be dirty), it will be loaded
932 /// into previous work products. We will then regenerate the set of
933 /// symbols in the partition P and hash them (note that new symbols
934 /// may be added -- for example, new monomorphizations -- even if
935 /// nothing in P changed!). We will compare that hash against the
936 /// previous hash. If it matches up, we can reuse the object file.
937 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
938 pub struct WorkProduct {
939 pub cgu_name: String,
940 /// Saved files associated with this CGU.
941 pub saved_files: Vec<(WorkProductFileKind, String)>,
944 #[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
945 pub enum WorkProductFileKind {
954 edges: SmallVec<[DepNodeIndex; 8]>,
955 fingerprint: Fingerprint,
958 /// `CurrentDepGraph` stores the dependency graph for the current session.
959 /// It will be populated as we run queries or tasks.
961 /// The nodes in it are identified by an index (`DepNodeIndex`).
962 /// The data for each node is stored in its `DepNodeData`, found in the `data` field.
964 /// We never remove nodes from the graph: they are only added.
966 /// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
967 /// locked separately. Operations that take a `DepNodeIndex` typically just access
970 /// The only operation that must manipulate both locks is adding new nodes, in which case
971 /// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
972 /// acquire the lock on `data.`
973 pub(super) struct CurrentDepGraph {
974 data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
975 node_to_node_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
977 /// Used to trap when a specific edge is added to the graph.
978 /// This is used for debug purposes and is only active with `debug_assertions`.
980 forbidden_edge: Option<EdgeFilter>,
982 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
983 /// their edges. This has the beneficial side-effect that multiple anonymous
984 /// nodes can be coalesced into one without changing the semantics of the
985 /// dependency graph. However, the merging of nodes can lead to a subtle
986 /// problem during red-green marking: The color of an anonymous node from
987 /// the current session might "shadow" the color of the node with the same
988 /// ID from the previous session. In order to side-step this problem, we make
989 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
990 /// This is implemented by mixing a session-key into the ID fingerprint of
991 /// each anon node. The session-key is just a random number generated when
992 /// the `DepGraph` is created.
993 anon_id_seed: Fingerprint,
995 /// These are simple counters that are for profiling and
996 /// debugging and only active with `debug_assertions`.
997 total_read_count: AtomicU64,
998 total_duplicate_read_count: AtomicU64,
1001 impl CurrentDepGraph {
1002 fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
1003 use std::time::{SystemTime, UNIX_EPOCH};
1005 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1006 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
1007 let mut stable_hasher = StableHasher::new();
1008 nanos.hash(&mut stable_hasher);
1010 let forbidden_edge = if cfg!(debug_assertions) {
1011 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1012 Ok(s) => match EdgeFilter::new(&s) {
1014 Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1022 // Pre-allocate the dep node structures. We over-allocate a little so
1023 // that we hopefully don't have to re-allocate during this compilation
1024 // session. The over-allocation is 2% plus a small constant to account
1025 // for the fact that in very small crates 2% might not be enough.
1026 let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
1029 data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
1030 node_to_node_index: Sharded::new(|| {
1031 FxHashMap::with_capacity_and_hasher(
1032 new_node_count_estimate / sharded::SHARDS,
1036 anon_id_seed: stable_hasher.finish(),
1038 total_read_count: AtomicU64::new(0),
1039 total_duplicate_read_count: AtomicU64::new(0),
1046 task_deps: TaskDeps,
1047 fingerprint: Fingerprint,
1049 self.alloc_node(node, task_deps.reads, fingerprint)
1052 fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
1053 debug_assert!(!kind.is_eval_always());
1055 let mut hasher = StableHasher::new();
1057 // The dep node indices are hashed here instead of hashing the dep nodes of the
1058 // dependencies. These indices may refer to different nodes per session, but this isn't
1059 // a problem here because we that ensure the final dep node hash is per session only by
1060 // combining it with the per session random number `anon_id_seed`. This hash only need
1061 // to map the dependencies to a single value on a per session basis.
1062 task_deps.reads.hash(&mut hasher);
1064 let target_dep_node = DepNode {
1067 // Fingerprint::combine() is faster than sending Fingerprint
1068 // through the StableHasher (at least as long as StableHasher
1070 hash: self.anon_id_seed.combine(hasher.finish()),
1073 self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
1079 edges: SmallVec<[DepNodeIndex; 8]>,
1080 fingerprint: Fingerprint,
1083 !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node)
1085 self.intern_node(dep_node, edges, fingerprint)
1091 edges: SmallVec<[DepNodeIndex; 8]>,
1092 fingerprint: Fingerprint,
1094 match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
1095 Entry::Occupied(entry) => *entry.get(),
1096 Entry::Vacant(entry) => {
1097 let mut data = self.data.lock();
1098 let dep_node_index = DepNodeIndex::new(data.len());
1099 data.push(DepNodeData { node: dep_node, edges, fingerprint });
1100 entry.insert(dep_node_index);
1108 fn read_index(&self, source: DepNodeIndex) {
1109 ty::tls::with_context_opt(|icx| {
1110 let icx = if let Some(icx) = icx { icx } else { return };
1111 if let Some(task_deps) = icx.task_deps {
1112 let mut task_deps = task_deps.lock();
1113 if cfg!(debug_assertions) {
1114 self.current.total_read_count.fetch_add(1, SeqCst);
1116 if task_deps.read_set.insert(source) {
1117 task_deps.reads.push(source);
1119 #[cfg(debug_assertions)]
1121 if let Some(target) = task_deps.node {
1122 let data = self.current.data.lock();
1123 if let Some(ref forbidden_edge) = self.current.forbidden_edge {
1124 let source = data[source].node;
1125 if forbidden_edge.test(&source, &target) {
1126 bug!("forbidden edge {:?} -> {:?} created", source, target)
1131 } else if cfg!(debug_assertions) {
1132 self.current.total_duplicate_read_count.fetch_add(1, SeqCst);
1139 pub struct TaskDeps {
1140 #[cfg(debug_assertions)]
1141 node: Option<DepNode>,
1142 reads: SmallVec<[DepNodeIndex; 8]>,
1143 read_set: FxHashSet<DepNodeIndex>,
1146 // A data structure that stores Option<DepNodeColor> values as a contiguous
1147 // array, using one u32 per entry.
1148 struct DepNodeColorMap {
1149 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1152 const COMPRESSED_NONE: u32 = 0;
1153 const COMPRESSED_RED: u32 = 1;
1154 const COMPRESSED_FIRST_GREEN: u32 = 2;
1156 impl DepNodeColorMap {
1157 fn new(size: usize) -> DepNodeColorMap {
1158 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1161 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1162 match self.values[index].load(Ordering::Acquire) {
1163 COMPRESSED_NONE => None,
1164 COMPRESSED_RED => Some(DepNodeColor::Red),
1166 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1171 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1172 self.values[index].store(
1174 DepNodeColor::Red => COMPRESSED_RED,
1175 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,