1 use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
2 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
3 use rustc_data_structures::profiling::QueryInvocationId;
4 use rustc_data_structures::sharded::{self, Sharded};
5 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
6 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
7 use rustc_data_structures::unlikely;
8 use rustc_errors::Diagnostic;
9 use rustc_index::vec::{Idx, IndexVec};
11 use parking_lot::{Condvar, Mutex};
12 use smallvec::{smallvec, SmallVec};
13 use std::collections::hash_map::Entry;
16 use std::marker::PhantomData;
18 use std::sync::atomic::Ordering::Relaxed;
20 use super::debug::EdgeFilter;
21 use super::prev::PreviousDepGraph;
22 use super::query::DepGraphQuery;
23 use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
24 use super::{DepContext, DepKind, DepNode, WorkProductId};
27 pub struct DepGraph<K: DepKind> {
28 data: Option<Lrc<DepGraphData<K>>>,
30 /// This field is used for assigning DepNodeIndices when running in
31 /// non-incremental mode. Even in non-incremental mode we make sure that
32 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
33 /// ID is used for self-profiling.
34 virtual_dep_node_index: Lrc<AtomicU32>,
37 rustc_index::newtype_index! {
38 pub struct DepNodeIndex { .. }
42 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
45 impl std::convert::From<DepNodeIndex> for QueryInvocationId {
47 fn from(dep_node_index: DepNodeIndex) -> Self {
48 QueryInvocationId(dep_node_index.as_u32())
53 pub enum DepNodeColor {
59 pub fn is_green(self) -> bool {
61 DepNodeColor::Red => false,
62 DepNodeColor::Green(_) => true,
67 struct DepGraphData<K: DepKind> {
68 /// The new encoding of the dependency graph, optimized for red/green
69 /// tracking. The `current` field is the dependency graph of only the
70 /// current compilation session: We don't merge the previous dep-graph into
71 /// current one anymore.
72 current: CurrentDepGraph<K>,
74 /// The dep-graph from the previous compilation session. It contains all
75 /// nodes and edges as well as all fingerprints of nodes that have them.
76 previous: PreviousDepGraph<K>,
78 colors: DepNodeColorMap,
80 /// A set of loaded diagnostics that is in the progress of being emitted.
81 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
83 /// Used to wait for diagnostics to be emitted.
84 emitting_diagnostics_cond_var: Condvar,
86 /// When we load, there may be `.o` files, cached MIR, or other such
87 /// things available to us. If we find that they are not dirty, we
88 /// load the path to the file storing those work-products here into
89 /// this map. We can later look for and extract that data.
90 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
92 dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
95 pub fn hash_result<HashCtxt, R>(hcx: &mut HashCtxt, result: &R) -> Option<Fingerprint>
97 R: HashStable<HashCtxt>,
99 let mut stable_hasher = StableHasher::new();
100 result.hash_stable(hcx, &mut stable_hasher);
102 Some(stable_hasher.finish())
105 impl<K: DepKind> DepGraph<K> {
107 prev_graph: PreviousDepGraph<K>,
108 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
110 let prev_graph_node_count = prev_graph.node_count();
113 data: Some(Lrc::new(DepGraphData {
114 previous_work_products: prev_work_products,
115 dep_node_debug: Default::default(),
116 current: CurrentDepGraph::new(prev_graph_node_count),
117 emitting_diagnostics: Default::default(),
118 emitting_diagnostics_cond_var: Condvar::new(),
119 previous: prev_graph,
120 colors: DepNodeColorMap::new(prev_graph_node_count),
122 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
126 pub fn new_disabled() -> DepGraph<K> {
127 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
130 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
132 pub fn is_fully_enabled(&self) -> bool {
136 pub fn query(&self) -> DepGraphQuery<K> {
137 let data = self.data.as_ref().unwrap().current.data.lock();
138 let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
139 let mut edges = Vec::new();
140 for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
141 for &edge_target in edge_targets.iter() {
142 let to = data[edge_target].node;
143 edges.push((from, to));
147 DepGraphQuery::new(&nodes[..], &edges[..])
150 pub fn assert_ignored(&self) {
151 if let Some(..) = self.data {
152 K::read_deps(|task_deps| {
153 assert!(task_deps.is_none(), "expected no task dependency tracking");
158 pub fn with_ignore<OP, R>(&self, op: OP) -> R
162 K::with_deps(None, op)
165 /// Starts a new dep-graph task. Dep-graph tasks are specified
166 /// using a free function (`task`) and **not** a closure -- this
167 /// is intentional because we want to exercise tight control over
168 /// what state they have access to. In particular, we want to
169 /// prevent implicit 'leaks' of tracked state into the task (which
170 /// could then be read without generating correct edges in the
171 /// dep-graph -- see the [rustc dev guide] for more details on
172 /// the dep-graph). To this end, the task function gets exactly two
173 /// pieces of state: the context `cx` and an argument `arg`. Both
174 /// of these bits of state must be of some type that implements
175 /// `DepGraphSafe` and hence does not leak.
177 /// The choice of two arguments is not fundamental. One argument
178 /// would work just as well, since multiple values can be
179 /// collected using tuples. However, using two arguments works out
180 /// to be quite convenient, since it is common to need a context
181 /// (`cx`) and some argument (e.g., a `DefId` identifying what
182 /// item to process).
184 /// For cases where you need some other number of arguments:
186 /// - If you only need one argument, just use `()` for the `arg`
188 /// - If you need 3+ arguments, use a tuple for the
191 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
192 pub fn with_task<Ctxt: DepContext<DepKind = K>, A, R>(
197 task: fn(Ctxt, A) -> R,
198 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
199 ) -> (R, DepNodeIndex) {
208 #[cfg(debug_assertions)]
210 reads: SmallVec::new(),
211 read_set: Default::default(),
212 phantom_data: PhantomData,
215 |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint),
220 fn with_task_impl<Ctxt: DepContext<DepKind = K>, A, R>(
226 task: fn(Ctxt, A) -> R,
227 create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>,
228 finish_task_and_alloc_depnode: fn(
234 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
235 ) -> (R, DepNodeIndex) {
236 if let Some(ref data) = self.data {
237 let task_deps = create_task(key).map(Lock::new);
239 // In incremental mode, hash the result of the task. We don't
240 // do anything with the hash yet, but we are computing it
242 // - we make sure that the infrastructure works and
243 // - we can get an idea of the runtime cost.
244 let mut hcx = cx.create_stable_hashing_context();
246 let result = if no_tcx {
249 K::with_deps(task_deps.as_ref(), || task(cx, arg))
252 let current_fingerprint = hash_result(&mut hcx, &result);
254 let dep_node_index = finish_task_and_alloc_depnode(
257 current_fingerprint.unwrap_or(Fingerprint::ZERO),
258 task_deps.map(|lock| lock.into_inner()),
261 let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks();
263 // Determine the color of the new DepNode.
264 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
265 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
267 let color = if let Some(current_fingerprint) = current_fingerprint {
268 if current_fingerprint == prev_fingerprint {
270 eprintln!("[task::green] {:?}", key);
272 DepNodeColor::Green(dep_node_index)
275 eprintln!("[task::red] {:?}", key);
281 eprintln!("[task::unknown] {:?}", key);
283 // Mark the node as Red if we can't hash the result
288 data.colors.get(prev_index).is_none(),
289 "DepGraph::with_task() - Duplicate DepNodeColor \
294 data.colors.insert(prev_index, color);
295 } else if print_status {
296 eprintln!("[task::new] {:?}", key);
299 (result, dep_node_index)
301 (task(cx, arg), self.next_virtual_depnode_index())
305 /// Executes something within an "anonymous" task, that is, a task the
306 /// `DepNode` of which is determined by the list of inputs it read from.
307 pub fn with_anon_task<OP, R>(&self, dep_kind: K, op: OP) -> (R, DepNodeIndex)
311 if let Some(ref data) = self.data {
312 let task_deps = Lock::new(TaskDeps::default());
314 let result = K::with_deps(Some(&task_deps), op);
315 let task_deps = task_deps.into_inner();
317 let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps);
318 (result, dep_node_index)
320 (op(), self.next_virtual_depnode_index())
324 /// Executes something within an "eval-always" task which is a task
325 /// that runs whenever anything changes.
326 pub fn with_eval_always_task<Ctxt: DepContext<DepKind = K>, A, R>(
331 task: fn(Ctxt, A) -> R,
332 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
333 ) -> (R, DepNodeIndex) {
341 |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint),
347 pub fn read(&self, v: DepNode<K>) {
348 if let Some(ref data) = self.data {
349 let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
350 if let Some(dep_node_index) = map.get(&v).copied() {
352 data.read_index(dep_node_index);
354 panic!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
360 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
361 if let Some(ref data) = self.data {
362 data.read_index(dep_node_index);
367 pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
373 .get_shard_by_value(dep_node)
381 pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
382 if let Some(ref data) = self.data {
385 .get_shard_by_value(&dep_node)
387 .contains_key(dep_node)
394 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
395 let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
396 data[dep_node_index].fingerprint
399 pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
400 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
403 /// Checks whether a previous work product exists for `v` and, if
404 /// so, return the path that leads to it. Used to skip doing work.
405 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
406 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
409 /// Access the map of work-products created during the cached run. Only
410 /// used during saving of the dep-graph.
411 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
412 &self.data.as_ref().unwrap().previous_work_products
416 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
418 F: FnOnce() -> String,
420 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
422 if dep_node_debug.borrow().contains_key(&dep_node) {
425 let debug_str = debug_str_gen();
426 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
429 pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
430 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
433 pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
434 if cfg!(debug_assertions) {
435 let current_dep_graph = &self.data.as_ref().unwrap().current;
438 current_dep_graph.total_read_count.load(Relaxed),
439 current_dep_graph.total_duplicate_read_count.load(Relaxed),
446 pub fn serialize(&self) -> SerializedDepGraph<K> {
447 let data = self.data.as_ref().unwrap().current.data.lock();
449 let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
450 data.iter().map(|d| d.fingerprint).collect();
451 let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect();
453 let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
455 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
456 let mut edge_list_data = Vec::with_capacity(total_edge_count);
458 for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
459 let start = edge_list_data.len() as u32;
460 // This should really just be a memcpy :/
461 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
462 let end = edge_list_data.len() as u32;
464 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
465 edge_list_indices.push((start, end));
468 debug_assert!(edge_list_data.len() <= u32::MAX as usize);
469 debug_assert_eq!(edge_list_data.len(), total_edge_count);
471 SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
474 pub fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
475 if let Some(ref data) = self.data {
476 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
477 return data.colors.get(prev_index);
479 // This is a node that did not exist in the previous compilation
480 // session, so we consider it to be red.
481 return Some(DepNodeColor::Red);
488 /// Try to read a node index for the node dep_node.
489 /// A node will have an index, when it's already been marked green, or when we can mark it
490 /// green. This function will mark the current task as a reader of the specified node, when
491 /// a node index can be found for that node.
492 pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>(
495 dep_node: &DepNode<K>,
496 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
497 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
498 debug_assert!(self.is_green(&dep_node));
499 self.read_index(dep_node_index);
500 (prev_index, dep_node_index)
504 pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>(
507 dep_node: &DepNode<K>,
508 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
509 debug_assert!(!dep_node.kind.is_eval_always());
511 // Return None if the dep graph is disabled
512 let data = self.data.as_ref()?;
514 // Return None if the dep node didn't exist in the previous session
515 let prev_index = data.previous.node_to_index_opt(dep_node)?;
517 match data.colors.get(prev_index) {
518 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
519 Some(DepNodeColor::Red) => None,
521 // This DepNode and the corresponding query invocation existed
522 // in the previous compilation session too, so we can try to
523 // mark it as green by recursively marking all of its
524 // dependencies green.
525 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
526 .map(|dep_node_index| (prev_index, dep_node_index))
531 /// Try to mark a dep-node which existed in the previous compilation session as green.
532 fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
535 data: &DepGraphData<K>,
536 prev_dep_node_index: SerializedDepNodeIndex,
537 dep_node: &DepNode<K>,
538 ) -> Option<DepNodeIndex> {
539 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
541 #[cfg(not(parallel_compiler))]
547 .get_shard_by_value(dep_node)
549 .contains_key(dep_node)
551 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
554 // We never try to mark eval_always nodes as green
555 debug_assert!(!dep_node.kind.is_eval_always());
557 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
559 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
561 let mut current_deps = SmallVec::new();
563 for &dep_dep_node_index in prev_deps {
564 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
566 match dep_dep_node_color {
567 Some(DepNodeColor::Green(node_index)) => {
568 // This dependency has been marked as green before, we are
569 // still fine and can continue with checking the other
572 "try_mark_previous_green({:?}) --- found dependency {:?} to \
573 be immediately green",
575 data.previous.index_to_node(dep_dep_node_index)
577 current_deps.push(node_index);
579 Some(DepNodeColor::Red) => {
580 // We found a dependency the value of which has changed
581 // compared to the previous compilation session. We cannot
582 // mark the DepNode as green and also don't need to bother
583 // with checking any of the other dependencies.
585 "try_mark_previous_green({:?}) - END - dependency {:?} was \
588 data.previous.index_to_node(dep_dep_node_index)
593 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
595 // We don't know the state of this dependency. If it isn't
596 // an eval_always node, let's try to mark it green recursively.
597 if !dep_dep_node.kind.is_eval_always() {
599 "try_mark_previous_green({:?}) --- state of dependency {:?} \
600 is unknown, trying to mark it green",
601 dep_node, dep_dep_node
604 let node_index = self.try_mark_previous_green(
610 if let Some(node_index) = node_index {
612 "try_mark_previous_green({:?}) --- managed to MARK \
613 dependency {:?} as green",
614 dep_node, dep_dep_node
616 current_deps.push(node_index);
621 // We failed to mark it green, so we try to force the query.
623 "try_mark_previous_green({:?}) --- trying to force \
625 dep_node, dep_dep_node
627 if tcx.try_force_from_dep_node(dep_dep_node) {
628 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
630 match dep_dep_node_color {
631 Some(DepNodeColor::Green(node_index)) => {
633 "try_mark_previous_green({:?}) --- managed to \
634 FORCE dependency {:?} to green",
635 dep_node, dep_dep_node
637 current_deps.push(node_index);
639 Some(DepNodeColor::Red) => {
641 "try_mark_previous_green({:?}) - END - \
642 dependency {:?} was red after forcing",
643 dep_node, dep_dep_node
648 if !tcx.has_errors_or_delayed_span_bugs() {
650 "try_mark_previous_green() - Forcing the DepNode \
651 should have set its color"
654 // If the query we just forced has resulted in
655 // some kind of compilation error, we cannot rely on
656 // the dep-node color having been properly updated.
657 // This means that the query system has reached an
658 // invalid state. We let the compiler continue (by
659 // returning `None`) so it can emit error messages
660 // and wind down, but rely on the fact that this
661 // invalid state will not be persisted to the
662 // incremental compilation cache because of
663 // compilation errors being present.
665 "try_mark_previous_green({:?}) - END - \
666 dependency {:?} resulted in compilation error",
667 dep_node, dep_dep_node
674 // The DepNode could not be forced.
676 "try_mark_previous_green({:?}) - END - dependency {:?} \
677 could not be forced",
678 dep_node, dep_dep_node
686 // If we got here without hitting a `return` that means that all
687 // dependencies of this DepNode could be marked as green. Therefore we
688 // can also mark this DepNode as green.
690 // There may be multiple threads trying to mark the same dep node green concurrently
692 let dep_node_index = {
693 // Copy the fingerprint from the previous graph,
694 // so we don't have to recompute it
695 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
697 // We allocating an entry for the node in the current dependency graph and
698 // adding all the appropriate edges imported from the previous graph
699 data.current.intern_node(*dep_node, current_deps, fingerprint)
702 // ... emitting any stored diagnostic ...
704 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
705 // Maybe store a list on disk and encode this fact in the DepNodeState
706 let diagnostics = tcx.load_diagnostics(prev_dep_node_index);
708 #[cfg(not(parallel_compiler))]
710 data.colors.get(prev_dep_node_index).is_none(),
711 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
716 if unlikely!(!diagnostics.is_empty()) {
717 self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
720 // ... and finally storing a "Green" entry in the color map.
721 // Multiple threads can all write the same color here
722 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
724 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
728 /// Atomically emits some loaded diagnostics.
729 /// This may be called concurrently on multiple threads for the same dep node.
732 fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>(
735 data: &DepGraphData<K>,
736 dep_node_index: DepNodeIndex,
737 prev_dep_node_index: SerializedDepNodeIndex,
738 diagnostics: Vec<Diagnostic>,
740 let mut emitting = data.emitting_diagnostics.lock();
742 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
743 // The node is already green so diagnostics must have been emitted already
747 if emitting.insert(dep_node_index) {
748 // We were the first to insert the node in the set so this thread
749 // must emit the diagnostics and signal other potentially waiting
753 // Promote the previous diagnostics to the current session.
754 tcx.store_diagnostics(dep_node_index, diagnostics.clone().into());
756 let handle = tcx.diagnostic();
758 for diagnostic in diagnostics {
759 handle.emit_diagnostic(&diagnostic);
762 // Mark the node as green now that diagnostics are emitted
763 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
765 // Remove the node from the set
766 data.emitting_diagnostics.lock().remove(&dep_node_index);
769 data.emitting_diagnostics_cond_var.notify_all();
771 // We must wait for the other thread to finish emitting the diagnostic
774 data.emitting_diagnostics_cond_var.wait(&mut emitting);
775 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
783 // Returns true if the given node has been marked as green during the
784 // current compilation session. Used in various assertions
785 pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
786 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
789 // This method loads all on-disk cacheable query results into memory, so
790 // they can be written out to the new cache file again. Most query results
791 // will already be in memory but in the case where we marked something as
792 // green but then did not need the value, that value will never have been
795 // This method will only load queries that will end up in the disk cache.
796 // Other queries will not be executed.
797 pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
798 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
800 let data = self.data.as_ref().unwrap();
801 for prev_index in data.colors.values.indices() {
802 match data.colors.get(prev_index) {
803 Some(DepNodeColor::Green(_)) => {
804 let dep_node = data.previous.index_to_node(prev_index);
805 tcx.try_load_from_on_disk_cache(&dep_node);
807 None | Some(DepNodeColor::Red) => {
808 // We can skip red nodes because a node can only be marked
809 // as red if the query result was recomputed and thus is
810 // already in memory.
816 fn next_virtual_depnode_index(&self) -> DepNodeIndex {
817 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
818 DepNodeIndex::from_u32(index)
822 /// A "work product" is an intermediate result that we save into the
823 /// incremental directory for later re-use. The primary example are
824 /// the object files that we save for each partition at code
827 /// Each work product is associated with a dep-node, representing the
828 /// process that produced the work-product. If that dep-node is found
829 /// to be dirty when we load up, then we will delete the work-product
830 /// at load time. If the work-product is found to be clean, then we
831 /// will keep a record in the `previous_work_products` list.
833 /// In addition, work products have an associated hash. This hash is
834 /// an extra hash that can be used to decide if the work-product from
835 /// a previous compilation can be re-used (in addition to the dirty
838 /// As the primary example, consider the object files we generate for
839 /// each partition. In the first run, we create partitions based on
840 /// the symbols that need to be compiled. For each partition P, we
841 /// hash the symbols in P and create a `WorkProduct` record associated
842 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
845 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
846 /// judged to be clean (which means none of the things we read to
847 /// generate the partition were found to be dirty), it will be loaded
848 /// into previous work products. We will then regenerate the set of
849 /// symbols in the partition P and hash them (note that new symbols
850 /// may be added -- for example, new monomorphizations -- even if
851 /// nothing in P changed!). We will compare that hash against the
852 /// previous hash. If it matches up, we can reuse the object file.
853 #[derive(Clone, Debug, Encodable, Decodable)]
854 pub struct WorkProduct {
855 pub cgu_name: String,
856 /// Saved file associated with this CGU.
857 pub saved_file: Option<String>,
861 struct DepNodeData<K> {
864 fingerprint: Fingerprint,
867 /// `CurrentDepGraph` stores the dependency graph for the current session.
868 /// It will be populated as we run queries or tasks.
870 /// The nodes in it are identified by an index (`DepNodeIndex`).
871 /// The data for each node is stored in its `DepNodeData`, found in the `data` field.
873 /// We never remove nodes from the graph: they are only added.
875 /// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
876 /// locked separately. Operations that take a `DepNodeIndex` typically just access
879 /// The only operation that must manipulate both locks is adding new nodes, in which case
880 /// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
881 /// acquire the lock on `data.`
882 pub(super) struct CurrentDepGraph<K> {
883 data: Lock<IndexVec<DepNodeIndex, DepNodeData<K>>>,
884 node_to_node_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
886 /// Used to trap when a specific edge is added to the graph.
887 /// This is used for debug purposes and is only active with `debug_assertions`.
889 forbidden_edge: Option<EdgeFilter>,
891 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
892 /// their edges. This has the beneficial side-effect that multiple anonymous
893 /// nodes can be coalesced into one without changing the semantics of the
894 /// dependency graph. However, the merging of nodes can lead to a subtle
895 /// problem during red-green marking: The color of an anonymous node from
896 /// the current session might "shadow" the color of the node with the same
897 /// ID from the previous session. In order to side-step this problem, we make
898 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
899 /// This is implemented by mixing a session-key into the ID fingerprint of
900 /// each anon node. The session-key is just a random number generated when
901 /// the `DepGraph` is created.
902 anon_id_seed: Fingerprint,
904 /// These are simple counters that are for profiling and
905 /// debugging and only active with `debug_assertions`.
906 total_read_count: AtomicU64,
907 total_duplicate_read_count: AtomicU64,
910 impl<K: DepKind> CurrentDepGraph<K> {
911 fn new(prev_graph_node_count: usize) -> CurrentDepGraph<K> {
912 use std::time::{SystemTime, UNIX_EPOCH};
914 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
915 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
916 let mut stable_hasher = StableHasher::new();
917 nanos.hash(&mut stable_hasher);
919 let forbidden_edge = if cfg!(debug_assertions) {
920 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
921 Ok(s) => match EdgeFilter::new(&s) {
923 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
931 // Pre-allocate the dep node structures. We over-allocate a little so
932 // that we hopefully don't have to re-allocate during this compilation
933 // session. The over-allocation is 2% plus a small constant to account
934 // for the fact that in very small crates 2% might not be enough.
935 let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
938 data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
939 node_to_node_index: Sharded::new(|| {
940 FxHashMap::with_capacity_and_hasher(
941 new_node_count_estimate / sharded::SHARDS,
945 anon_id_seed: stable_hasher.finish(),
947 total_read_count: AtomicU64::new(0),
948 total_duplicate_read_count: AtomicU64::new(0),
955 task_deps: TaskDeps<K>,
956 fingerprint: Fingerprint,
958 self.alloc_node(node, task_deps.reads, fingerprint)
961 fn complete_anon_task(&self, kind: K, task_deps: TaskDeps<K>) -> DepNodeIndex {
962 debug_assert!(!kind.is_eval_always());
964 let mut hasher = StableHasher::new();
966 // The dep node indices are hashed here instead of hashing the dep nodes of the
967 // dependencies. These indices may refer to different nodes per session, but this isn't
968 // a problem here because we that ensure the final dep node hash is per session only by
969 // combining it with the per session random number `anon_id_seed`. This hash only need
970 // to map the dependencies to a single value on a per session basis.
971 task_deps.reads.hash(&mut hasher);
973 let target_dep_node = DepNode {
976 // Fingerprint::combine() is faster than sending Fingerprint
977 // through the StableHasher (at least as long as StableHasher
979 hash: PackedFingerprint(self.anon_id_seed.combine(hasher.finish())),
982 self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
987 dep_node: DepNode<K>,
989 fingerprint: Fingerprint,
992 !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node)
994 self.intern_node(dep_node, edges, fingerprint)
999 dep_node: DepNode<K>,
1001 fingerprint: Fingerprint,
1003 match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
1004 Entry::Occupied(entry) => *entry.get(),
1005 Entry::Vacant(entry) => {
1006 let mut data = self.data.lock();
1007 let dep_node_index = DepNodeIndex::new(data.len());
1008 data.push(DepNodeData { node: dep_node, edges, fingerprint });
1009 entry.insert(dep_node_index);
1016 impl<K: DepKind> DepGraphData<K> {
1018 fn read_index(&self, source: DepNodeIndex) {
1019 K::read_deps(|task_deps| {
1020 if let Some(task_deps) = task_deps {
1021 let mut task_deps = task_deps.lock();
1022 let task_deps = &mut *task_deps;
1023 if cfg!(debug_assertions) {
1024 self.current.total_read_count.fetch_add(1, Relaxed);
1027 // As long as we only have a low number of reads we can avoid doing a hash
1028 // insert and potentially allocating/reallocating the hashmap
1029 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
1030 task_deps.reads.iter().all(|other| *other != source)
1032 task_deps.read_set.insert(source)
1035 task_deps.reads.push(source);
1036 if task_deps.reads.len() == TASK_DEPS_READS_CAP {
1037 // Fill `read_set` with what we have so far so we can use the hashset next
1039 task_deps.read_set.extend(task_deps.reads.iter().copied());
1042 #[cfg(debug_assertions)]
1044 if let Some(target) = task_deps.node {
1045 let data = self.current.data.lock();
1046 if let Some(ref forbidden_edge) = self.current.forbidden_edge {
1047 let source = data[source].node;
1048 if forbidden_edge.test(&source, &target) {
1049 panic!("forbidden edge {:?} -> {:?} created", source, target)
1054 } else if cfg!(debug_assertions) {
1055 self.current.total_duplicate_read_count.fetch_add(1, Relaxed);
1062 /// The capacity of the `reads` field `SmallVec`
1063 const TASK_DEPS_READS_CAP: usize = 8;
1064 type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1066 pub struct TaskDeps<K> {
1067 #[cfg(debug_assertions)]
1068 node: Option<DepNode<K>>,
1070 read_set: FxHashSet<DepNodeIndex>,
1071 phantom_data: PhantomData<DepNode<K>>,
1074 impl<K> Default for TaskDeps<K> {
1075 fn default() -> Self {
1077 #[cfg(debug_assertions)]
1079 reads: EdgesVec::new(),
1080 read_set: FxHashSet::default(),
1081 phantom_data: PhantomData,
1086 // A data structure that stores Option<DepNodeColor> values as a contiguous
1087 // array, using one u32 per entry.
1088 struct DepNodeColorMap {
1089 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1092 const COMPRESSED_NONE: u32 = 0;
1093 const COMPRESSED_RED: u32 = 1;
1094 const COMPRESSED_FIRST_GREEN: u32 = 2;
1096 impl DepNodeColorMap {
1097 fn new(size: usize) -> DepNodeColorMap {
1098 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1102 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1103 match self.values[index].load(Ordering::Acquire) {
1104 COMPRESSED_NONE => None,
1105 COMPRESSED_RED => Some(DepNodeColor::Red),
1107 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1112 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1113 self.values[index].store(
1115 DepNodeColor::Red => COMPRESSED_RED,
1116 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,