1 use errors::{Diagnostic, DiagnosticBuilder};
2 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
3 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
4 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
5 use smallvec::SmallVec;
6 use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering};
9 use std::collections::hash_map::Entry;
10 use crate::ty::{self, TyCtxt};
11 use crate::util::common::{ProfileQueriesMsg, profq_msg};
12 use parking_lot::{Mutex, Condvar};
14 use crate::ich::{StableHashingContext, StableHashingContextProvider, Fingerprint};
16 use super::debug::EdgeFilter;
17 use super::dep_node::{DepNode, DepKind, WorkProductId};
18 use super::query::DepGraphQuery;
19 use super::safe::DepGraphSafe;
20 use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
21 use super::prev::PreviousDepGraph;
25 data: Option<Lrc<DepGraphData>>,
29 pub struct DepNodeIndex { .. }
33 const INVALID: DepNodeIndex = DepNodeIndex::MAX;
36 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
37 pub enum DepNodeColor {
43 pub fn is_green(self) -> bool {
45 DepNodeColor::Red => false,
46 DepNodeColor::Green(_) => true,
52 /// The new encoding of the dependency graph, optimized for red/green
53 /// tracking. The `current` field is the dependency graph of only the
54 /// current compilation session: We don't merge the previous dep-graph into
55 /// current one anymore.
56 current: Lock<CurrentDepGraph>,
58 /// The dep-graph from the previous compilation session. It contains all
59 /// nodes and edges as well as all fingerprints of nodes that have them.
60 previous: PreviousDepGraph,
62 colors: DepNodeColorMap,
64 /// A set of loaded diagnostics that have been emitted.
65 emitted_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
67 /// Used to wait for diagnostics to be emitted.
68 emitted_diagnostics_cond_var: Condvar,
70 /// When we load, there may be `.o` files, cached MIR, or other such
71 /// things available to us. If we find that they are not dirty, we
72 /// load the path to the file storing those work-products here into
73 /// this map. We can later look for and extract that data.
74 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
76 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
78 // Used for testing, only populated when -Zquery-dep-graph is specified.
79 loaded_from_cache: Lock<FxHashMap<DepNodeIndex, bool>>,
82 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
84 R: for<'a> HashStable<StableHashingContext<'a>>,
86 let mut stable_hasher = StableHasher::new();
87 result.hash_stable(hcx, &mut stable_hasher);
89 Some(stable_hasher.finish())
94 pub fn new(prev_graph: PreviousDepGraph,
95 prev_work_products: FxHashMap<WorkProductId, WorkProduct>) -> DepGraph {
96 let prev_graph_node_count = prev_graph.node_count();
99 data: Some(Lrc::new(DepGraphData {
100 previous_work_products: prev_work_products,
101 dep_node_debug: Default::default(),
102 current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
103 emitted_diagnostics: Default::default(),
104 emitted_diagnostics_cond_var: Condvar::new(),
105 previous: prev_graph,
106 colors: DepNodeColorMap::new(prev_graph_node_count),
107 loaded_from_cache: Default::default(),
112 pub fn new_disabled() -> DepGraph {
118 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
120 pub fn is_fully_enabled(&self) -> bool {
124 pub fn query(&self) -> DepGraphQuery {
125 let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
126 let nodes: Vec<_> = current_dep_graph.data.iter().map(|n| n.node).collect();
127 let mut edges = Vec::new();
128 for (from, edge_targets) in current_dep_graph.data.iter()
129 .map(|d| (d.node, &d.edges)) {
130 for &edge_target in edge_targets.iter() {
131 let to = current_dep_graph.data[edge_target].node;
132 edges.push((from, to));
136 DepGraphQuery::new(&nodes[..], &edges[..])
139 pub fn assert_ignored(&self)
141 if let Some(..) = self.data {
142 ty::tls::with_context_opt(|icx| {
143 let icx = if let Some(icx) = icx { icx } else { return };
144 assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
149 pub fn with_ignore<OP,R>(&self, op: OP) -> R
150 where OP: FnOnce() -> R
152 ty::tls::with_context(|icx| {
153 let icx = ty::tls::ImplicitCtxt {
158 ty::tls::enter_context(&icx, |_| {
164 /// Starts a new dep-graph task. Dep-graph tasks are specified
165 /// using a free function (`task`) and **not** a closure -- this
166 /// is intentional because we want to exercise tight control over
167 /// what state they have access to. In particular, we want to
168 /// prevent implicit 'leaks' of tracked state into the task (which
169 /// could then be read without generating correct edges in the
170 /// dep-graph -- see the [rustc guide] for more details on
171 /// the dep-graph). To this end, the task function gets exactly two
172 /// pieces of state: the context `cx` and an argument `arg`. Both
173 /// of these bits of state must be of some type that implements
174 /// `DepGraphSafe` and hence does not leak.
176 /// The choice of two arguments is not fundamental. One argument
177 /// would work just as well, since multiple values can be
178 /// collected using tuples. However, using two arguments works out
179 /// to be quite convenient, since it is common to need a context
180 /// (`cx`) and some argument (e.g., a `DefId` identifying what
181 /// item to process).
183 /// For cases where you need some other number of arguments:
185 /// - If you only need one argument, just use `()` for the `arg`
187 /// - If you need 3+ arguments, use a tuple for the
190 /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html
191 pub fn with_task<'a, C, A, R>(
197 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
198 ) -> (R, DepNodeIndex)
200 C: DepGraphSafe + StableHashingContextProvider<'a>,
202 self.with_task_impl(key, cx, arg, false, task,
203 |_key| Some(TaskDeps {
204 #[cfg(debug_assertions)]
206 reads: SmallVec::new(),
207 read_set: Default::default(),
209 |data, key, fingerprint, task| {
210 data.borrow_mut().complete_task(key, task.unwrap(), fingerprint)
215 /// Creates a new dep-graph input with value `input`
216 pub fn input_task<'a, C, R>(&self,
221 where C: DepGraphSafe + StableHashingContextProvider<'a>,
222 R: for<'b> HashStable<StableHashingContext<'b>>,
224 fn identity_fn<C, A>(_: C, arg: A) -> A {
228 self.with_task_impl(key, cx, input, true, identity_fn,
230 |data, key, fingerprint, _| {
231 data.borrow_mut().alloc_node(key, SmallVec::new(), fingerprint)
236 fn with_task_impl<'a, C, A, R>(
243 create_task: fn(DepNode) -> Option<TaskDeps>,
244 finish_task_and_alloc_depnode: fn(&Lock<CurrentDepGraph>,
247 Option<TaskDeps>) -> DepNodeIndex,
248 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
249 ) -> (R, DepNodeIndex)
251 C: DepGraphSafe + StableHashingContextProvider<'a>,
253 if let Some(ref data) = self.data {
254 let task_deps = create_task(key).map(|deps| Lock::new(deps));
256 // In incremental mode, hash the result of the task. We don't
257 // do anything with the hash yet, but we are computing it
259 // - we make sure that the infrastructure works and
260 // - we can get an idea of the runtime cost.
261 let mut hcx = cx.get_stable_hashing_context();
263 if cfg!(debug_assertions) {
264 profq_msg(hcx.sess(), ProfileQueriesMsg::TaskBegin(key.clone()))
267 let result = if no_tcx {
270 ty::tls::with_context(|icx| {
271 let icx = ty::tls::ImplicitCtxt {
272 task_deps: task_deps.as_ref(),
276 ty::tls::enter_context(&icx, |_| {
282 if cfg!(debug_assertions) {
283 profq_msg(hcx.sess(), ProfileQueriesMsg::TaskEnd)
286 let current_fingerprint = hash_result(&mut hcx, &result);
288 let dep_node_index = finish_task_and_alloc_depnode(
291 current_fingerprint.unwrap_or(Fingerprint::ZERO),
292 task_deps.map(|lock| lock.into_inner()),
295 // Determine the color of the new DepNode.
296 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
297 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
299 let color = if let Some(current_fingerprint) = current_fingerprint {
300 if current_fingerprint == prev_fingerprint {
301 DepNodeColor::Green(dep_node_index)
306 // Mark the node as Red if we can't hash the result
310 debug_assert!(data.colors.get(prev_index).is_none(),
311 "DepGraph::with_task() - Duplicate DepNodeColor \
312 insertion for {:?}", key);
314 data.colors.insert(prev_index, color);
317 (result, dep_node_index)
319 (task(cx, arg), DepNodeIndex::INVALID)
323 /// Executes something within an "anonymous" task, that is, a task the
324 /// `DepNode` of which is determined by the list of inputs it read from.
325 pub fn with_anon_task<OP,R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
326 where OP: FnOnce() -> R
328 if let Some(ref data) = self.data {
329 let (result, task_deps) = ty::tls::with_context(|icx| {
330 let task_deps = Lock::new(TaskDeps {
331 #[cfg(debug_assertions)]
333 reads: SmallVec::new(),
334 read_set: Default::default(),
338 let icx = ty::tls::ImplicitCtxt {
339 task_deps: Some(&task_deps),
343 ty::tls::enter_context(&icx, |_| {
348 (r, task_deps.into_inner())
350 let dep_node_index = data.current
352 .complete_anon_task(dep_kind, task_deps);
353 (result, dep_node_index)
355 (op(), DepNodeIndex::INVALID)
359 /// Executes something within an "eval-always" task which is a task
360 /// that runs whenever anything changes.
361 pub fn with_eval_always_task<'a, C, A, R>(
367 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
368 ) -> (R, DepNodeIndex)
370 C: DepGraphSafe + StableHashingContextProvider<'a>,
372 self.with_task_impl(key, cx, arg, false, task,
374 |data, key, fingerprint, _| {
375 let mut current = data.borrow_mut();
376 let krate_idx = current.node_to_node_index[
377 &DepNode::new_no_params(DepKind::Krate)
379 current.alloc_node(key, smallvec![krate_idx], fingerprint)
385 pub fn read(&self, v: DepNode) {
386 if let Some(ref data) = self.data {
387 let current = data.current.borrow_mut();
388 if let Some(&dep_node_index) = current.node_to_node_index.get(&v) {
389 std::mem::drop(current);
390 data.read_index(dep_node_index);
392 bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
398 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
399 if let Some(ref data) = self.data {
400 data.read_index(dep_node_index);
405 pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
418 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
419 if let Some(ref data) = self.data {
420 data.current.borrow_mut().node_to_node_index.contains_key(dep_node)
427 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
428 let current = self.data.as_ref().expect("dep graph enabled").current.borrow_mut();
429 current.data[dep_node_index].fingerprint
432 pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
433 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
437 pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
438 self.data.as_ref().unwrap().previous.node_to_index(dep_node)
441 /// Checks whether a previous work product exists for `v` and, if
442 /// so, return the path that leads to it. Used to skip doing work.
443 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
447 data.previous_work_products.get(v).cloned()
451 /// Access the map of work-products created during the cached run. Only
452 /// used during saving of the dep-graph.
453 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
454 &self.data.as_ref().unwrap().previous_work_products
458 pub fn register_dep_node_debug_str<F>(&self,
461 where F: FnOnce() -> String
463 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
465 if dep_node_debug.borrow().contains_key(&dep_node) {
468 let debug_str = debug_str_gen();
469 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
472 pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
481 pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
482 if cfg!(debug_assertions) {
483 let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
485 Some((current_dep_graph.total_read_count,
486 current_dep_graph.total_duplicate_read_count))
492 pub fn serialize(&self) -> SerializedDepGraph {
493 let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
495 let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
496 current_dep_graph.data.iter().map(|d| d.fingerprint).collect();
497 let nodes: IndexVec<SerializedDepNodeIndex, _> =
498 current_dep_graph.data.iter().map(|d| d.node).collect();
500 let total_edge_count: usize = current_dep_graph.data.iter()
501 .map(|d| d.edges.len())
504 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
505 let mut edge_list_data = Vec::with_capacity(total_edge_count);
507 for (current_dep_node_index, edges) in current_dep_graph.data.iter_enumerated()
508 .map(|(i, d)| (i, &d.edges)) {
509 let start = edge_list_data.len() as u32;
510 // This should really just be a memcpy :/
511 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
512 let end = edge_list_data.len() as u32;
514 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
515 edge_list_indices.push((start, end));
518 debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
519 debug_assert_eq!(edge_list_data.len(), total_edge_count);
529 pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
530 if let Some(ref data) = self.data {
531 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
532 return data.colors.get(prev_index)
534 // This is a node that did not exist in the previous compilation
535 // session, so we consider it to be red.
536 return Some(DepNodeColor::Red)
543 /// Try to read a node index for the node dep_node.
544 /// A node will have an index, when it's already been marked green, or when we can mark it
545 /// green. This function will mark the current task as a reader of the specified node, when
546 /// a node index can be found for that node.
547 pub fn try_mark_green_and_read(
549 tcx: TyCtxt<'_, '_, '_>,
551 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
552 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
553 debug_assert!(self.is_green(&dep_node));
554 self.read_index(dep_node_index);
555 (prev_index, dep_node_index)
559 pub fn try_mark_green(
561 tcx: TyCtxt<'_, '_, '_>,
563 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
564 debug_assert!(!dep_node.kind.is_input());
566 // Return None if the dep graph is disabled
567 let data = self.data.as_ref()?;
569 // Return None if the dep node didn't exist in the previous session
570 let prev_index = data.previous.node_to_index_opt(dep_node)?;
572 match data.colors.get(prev_index) {
573 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
574 Some(DepNodeColor::Red) => None,
576 // This DepNode and the corresponding query invocation existed
577 // in the previous compilation session too, so we can try to
578 // mark it as green by recursively marking all of its
579 // dependencies green.
580 self.try_mark_previous_green(
585 ).map(|dep_node_index| {
586 (prev_index, dep_node_index)
592 /// Try to mark a dep-node which existed in the previous compilation session as green.
593 fn try_mark_previous_green<'tcx>(
595 tcx: TyCtxt<'_, 'tcx, 'tcx>,
597 prev_dep_node_index: SerializedDepNodeIndex,
599 ) -> Option<DepNodeIndex> {
600 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
602 #[cfg(not(parallel_compiler))]
604 debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node));
605 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
608 // We never try to mark inputs as green
609 debug_assert!(!dep_node.kind.is_input());
611 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
613 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
615 let mut current_deps = SmallVec::new();
617 for &dep_dep_node_index in prev_deps {
618 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
620 match dep_dep_node_color {
621 Some(DepNodeColor::Green(node_index)) => {
622 // This dependency has been marked as green before, we are
623 // still fine and can continue with checking the other
625 debug!("try_mark_previous_green({:?}) --- found dependency {:?} to \
626 be immediately green",
628 data.previous.index_to_node(dep_dep_node_index));
629 current_deps.push(node_index);
631 Some(DepNodeColor::Red) => {
632 // We found a dependency the value of which has changed
633 // compared to the previous compilation session. We cannot
634 // mark the DepNode as green and also don't need to bother
635 // with checking any of the other dependencies.
636 debug!("try_mark_previous_green({:?}) - END - dependency {:?} was \
639 data.previous.index_to_node(dep_dep_node_index));
643 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
645 // We don't know the state of this dependency. If it isn't
646 // an input node, let's try to mark it green recursively.
647 if !dep_dep_node.kind.is_input() {
648 debug!("try_mark_previous_green({:?}) --- state of dependency {:?} \
649 is unknown, trying to mark it green", dep_node,
652 let node_index = self.try_mark_previous_green(
658 if let Some(node_index) = node_index {
659 debug!("try_mark_previous_green({:?}) --- managed to MARK \
660 dependency {:?} as green", dep_node, dep_dep_node);
661 current_deps.push(node_index);
665 match dep_dep_node.kind {
668 DepKind::CrateMetadata => {
669 if dep_dep_node.extract_def_id(tcx).is_none() {
670 // If the node does not exist anymore, we
671 // just fail to mark green.
674 // If the node does exist, it should have
675 // been pre-allocated.
676 bug!("DepNode {:?} should have been \
677 pre-allocated but wasn't.",
682 // For other kinds of inputs it's OK to be
688 // We failed to mark it green, so we try to force the query.
689 debug!("try_mark_previous_green({:?}) --- trying to force \
690 dependency {:?}", dep_node, dep_dep_node);
691 if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
692 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
694 match dep_dep_node_color {
695 Some(DepNodeColor::Green(node_index)) => {
696 debug!("try_mark_previous_green({:?}) --- managed to \
697 FORCE dependency {:?} to green",
698 dep_node, dep_dep_node);
699 current_deps.push(node_index);
701 Some(DepNodeColor::Red) => {
702 debug!("try_mark_previous_green({:?}) - END - \
703 dependency {:?} was red after forcing",
709 if !tcx.sess.has_errors() {
710 bug!("try_mark_previous_green() - Forcing the DepNode \
711 should have set its color")
713 // If the query we just forced has resulted
714 // in some kind of compilation error, we
715 // don't expect that the corresponding
716 // dep-node color has been updated.
721 // The DepNode could not be forced.
722 debug!("try_mark_previous_green({:?}) - END - dependency {:?} \
723 could not be forced", dep_node, dep_dep_node);
730 // If we got here without hitting a `return` that means that all
731 // dependencies of this DepNode could be marked as green. Therefore we
732 // can also mark this DepNode as green.
734 // There may be multiple threads trying to mark the same dep node green concurrently
736 let (dep_node_index, did_allocation) = {
737 let mut current = data.current.borrow_mut();
739 // Copy the fingerprint from the previous graph,
740 // so we don't have to recompute it
741 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
743 // We allocating an entry for the node in the current dependency graph and
744 // adding all the appropriate edges imported from the previous graph
745 current.intern_node(*dep_node, current_deps, fingerprint)
748 // ... emitting any stored diagnostic ...
750 let diagnostics = tcx.queries.on_disk_cache
751 .load_diagnostics(tcx, prev_dep_node_index);
753 if unlikely!(diagnostics.len() > 0) {
754 self.emit_diagnostics(
763 // ... and finally storing a "Green" entry in the color map.
764 // Multiple threads can all write the same color here
765 #[cfg(not(parallel_compiler))]
766 debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
767 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
768 insertion for {:?}", dep_node);
770 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
772 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
776 /// Atomically emits some loaded diagnotics, assuming that this only gets called with
777 /// `did_allocation` set to `true` on a single thread.
780 fn emit_diagnostics<'tcx>(
782 tcx: TyCtxt<'_, 'tcx, 'tcx>,
784 dep_node_index: DepNodeIndex,
785 did_allocation: bool,
786 diagnostics: Vec<Diagnostic>,
788 if did_allocation || !cfg!(parallel_compiler) {
789 // Only the thread which did the allocation emits the error messages
790 let handle = tcx.sess.diagnostic();
792 // Promote the previous diagnostics to the current session.
793 tcx.queries.on_disk_cache
794 .store_diagnostics(dep_node_index, diagnostics.clone().into());
796 for diagnostic in diagnostics {
797 DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
800 #[cfg(parallel_compiler)]
802 // Mark the diagnostics and emitted and wake up waiters
803 data.emitted_diagnostics.lock().insert(dep_node_index);
804 data.emitted_diagnostics_cond_var.notify_all();
807 // The other threads will wait for the diagnostics to be emitted
809 let mut emitted_diagnostics = data.emitted_diagnostics.lock();
811 if emitted_diagnostics.contains(&dep_node_index) {
814 data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics);
819 // Returns true if the given node has been marked as green during the
820 // current compilation session. Used in various assertions
821 pub fn is_green(&self, dep_node: &DepNode) -> bool {
822 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
825 // This method loads all on-disk cacheable query results into memory, so
826 // they can be written out to the new cache file again. Most query results
827 // will already be in memory but in the case where we marked something as
828 // green but then did not need the value, that value will never have been
831 // This method will only load queries that will end up in the disk cache.
832 // Other queries will not be executed.
833 pub fn exec_cache_promotions<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) {
834 let green_nodes: Vec<DepNode> = {
835 let data = self.data.as_ref().unwrap();
836 data.colors.values.indices().filter_map(|prev_index| {
837 match data.colors.get(prev_index) {
838 Some(DepNodeColor::Green(_)) => {
839 let dep_node = data.previous.index_to_node(prev_index);
840 if dep_node.cache_on_disk(tcx) {
847 Some(DepNodeColor::Red) => {
848 // We can skip red nodes because a node can only be marked
849 // as red if the query result was recomputed and thus is
850 // already in memory.
857 for dep_node in green_nodes {
858 dep_node.load_from_on_disk_cache(tcx);
862 pub fn mark_loaded_from_cache(&self, dep_node_index: DepNodeIndex, state: bool) {
863 debug!("mark_loaded_from_cache({:?}, {})",
864 self.data.as_ref().unwrap().current.borrow().data[dep_node_index].node,
872 .insert(dep_node_index, state);
875 pub fn was_loaded_from_cache(&self, dep_node: &DepNode) -> Option<bool> {
876 let data = self.data.as_ref().unwrap();
877 let dep_node_index = data.current.borrow().node_to_node_index[dep_node];
878 data.loaded_from_cache.borrow().get(&dep_node_index).cloned()
882 /// A "work product" is an intermediate result that we save into the
883 /// incremental directory for later re-use. The primary example are
884 /// the object files that we save for each partition at code
887 /// Each work product is associated with a dep-node, representing the
888 /// process that produced the work-product. If that dep-node is found
889 /// to be dirty when we load up, then we will delete the work-product
890 /// at load time. If the work-product is found to be clean, then we
891 /// will keep a record in the `previous_work_products` list.
893 /// In addition, work products have an associated hash. This hash is
894 /// an extra hash that can be used to decide if the work-product from
895 /// a previous compilation can be re-used (in addition to the dirty
898 /// As the primary example, consider the object files we generate for
899 /// each partition. In the first run, we create partitions based on
900 /// the symbols that need to be compiled. For each partition P, we
901 /// hash the symbols in P and create a `WorkProduct` record associated
902 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
905 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
906 /// judged to be clean (which means none of the things we read to
907 /// generate the partition were found to be dirty), it will be loaded
908 /// into previous work products. We will then regenerate the set of
909 /// symbols in the partition P and hash them (note that new symbols
910 /// may be added -- for example, new monomorphizations -- even if
911 /// nothing in P changed!). We will compare that hash against the
912 /// previous hash. If it matches up, we can reuse the object file.
913 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
914 pub struct WorkProduct {
915 pub cgu_name: String,
916 /// Saved files associated with this CGU.
917 pub saved_files: Vec<(WorkProductFileKind, String)>,
920 #[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
921 pub enum WorkProductFileKind {
930 edges: SmallVec<[DepNodeIndex; 8]>,
931 fingerprint: Fingerprint,
934 pub(super) struct CurrentDepGraph {
935 data: IndexVec<DepNodeIndex, DepNodeData>,
936 node_to_node_index: FxHashMap<DepNode, DepNodeIndex>,
938 forbidden_edge: Option<EdgeFilter>,
940 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
941 /// their edges. This has the beneficial side-effect that multiple anonymous
942 /// nodes can be coalesced into one without changing the semantics of the
943 /// dependency graph. However, the merging of nodes can lead to a subtle
944 /// problem during red-green marking: The color of an anonymous node from
945 /// the current session might "shadow" the color of the node with the same
946 /// ID from the previous session. In order to side-step this problem, we make
947 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
948 /// This is implemented by mixing a session-key into the ID fingerprint of
949 /// each anon node. The session-key is just a random number generated when
950 /// the `DepGraph` is created.
951 anon_id_seed: Fingerprint,
953 total_read_count: u64,
954 total_duplicate_read_count: u64,
957 impl CurrentDepGraph {
958 fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
959 use std::time::{SystemTime, UNIX_EPOCH};
961 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
962 let nanos = duration.as_secs() * 1_000_000_000 +
963 duration.subsec_nanos() as u64;
964 let mut stable_hasher = StableHasher::new();
965 nanos.hash(&mut stable_hasher);
967 let forbidden_edge = if cfg!(debug_assertions) {
968 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
970 match EdgeFilter::new(&s) {
972 Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
981 // Pre-allocate the dep node structures. We over-allocate a little so
982 // that we hopefully don't have to re-allocate during this compilation
984 let new_node_count_estimate = (prev_graph_node_count * 115) / 100;
987 data: IndexVec::with_capacity(new_node_count_estimate),
988 node_to_node_index: FxHashMap::with_capacity_and_hasher(
989 new_node_count_estimate,
992 anon_id_seed: stable_hasher.finish(),
995 total_duplicate_read_count: 0,
1002 task_deps: TaskDeps,
1003 fingerprint: Fingerprint
1005 // If this is an input node, we expect that it either has no
1006 // dependencies, or that it just depends on DepKind::CrateMetadata
1007 // or DepKind::Krate. This happens for some "thin wrapper queries"
1008 // like `crate_disambiguator` which sometimes have zero deps (for
1009 // when called for LOCAL_CRATE) or they depend on a CrateMetadata
1011 if cfg!(debug_assertions) {
1012 if node.kind.is_input() && task_deps.reads.len() > 0 &&
1013 // FIXME(mw): Special case for DefSpan until Spans are handled
1014 // better in general.
1015 node.kind != DepKind::DefSpan &&
1016 task_deps.reads.iter().any(|&i| {
1017 !(self.data[i].node.kind == DepKind::CrateMetadata ||
1018 self.data[i].node.kind == DepKind::Krate)
1021 bug!("Input node {:?} with unexpected reads: {:?}",
1023 task_deps.reads.iter().map(|&i| self.data[i].node).collect::<Vec<_>>())
1027 self.alloc_node(node, task_deps.reads, fingerprint)
1030 fn complete_anon_task(&mut self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
1031 debug_assert!(!kind.is_input());
1033 let mut fingerprint = self.anon_id_seed;
1034 let mut hasher = StableHasher::new();
1036 for &read in task_deps.reads.iter() {
1037 let read_dep_node = self.data[read].node;
1039 ::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher);
1041 // Fingerprint::combine() is faster than sending Fingerprint
1042 // through the StableHasher (at least as long as StableHasher
1044 fingerprint = fingerprint.combine(read_dep_node.hash);
1047 fingerprint = fingerprint.combine(hasher.finish());
1049 let target_dep_node = DepNode {
1054 self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO).0
1060 edges: SmallVec<[DepNodeIndex; 8]>,
1061 fingerprint: Fingerprint
1063 debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
1064 self.intern_node(dep_node, edges, fingerprint).0
1070 edges: SmallVec<[DepNodeIndex; 8]>,
1071 fingerprint: Fingerprint
1072 ) -> (DepNodeIndex, bool) {
1073 debug_assert_eq!(self.node_to_node_index.len(), self.data.len());
1075 match self.node_to_node_index.entry(dep_node) {
1076 Entry::Occupied(entry) => (*entry.get(), false),
1077 Entry::Vacant(entry) => {
1078 let dep_node_index = DepNodeIndex::new(self.data.len());
1079 self.data.push(DepNodeData {
1084 entry.insert(dep_node_index);
1085 (dep_node_index, true)
1092 fn read_index(&self, source: DepNodeIndex) {
1093 ty::tls::with_context_opt(|icx| {
1094 let icx = if let Some(icx) = icx { icx } else { return };
1095 if let Some(task_deps) = icx.task_deps {
1096 let mut task_deps = task_deps.lock();
1097 if cfg!(debug_assertions) {
1098 self.current.lock().total_read_count += 1;
1100 if task_deps.read_set.insert(source) {
1101 task_deps.reads.push(source);
1103 #[cfg(debug_assertions)]
1105 if let Some(target) = task_deps.node {
1106 let graph = self.current.lock();
1107 if let Some(ref forbidden_edge) = graph.forbidden_edge {
1108 let source = graph.data[source].node;
1109 if forbidden_edge.test(&source, &target) {
1110 bug!("forbidden edge {:?} -> {:?} created",
1117 } else if cfg!(debug_assertions) {
1118 self.current.lock().total_duplicate_read_count += 1;
1125 pub struct TaskDeps {
1126 #[cfg(debug_assertions)]
1127 node: Option<DepNode>,
1128 reads: SmallVec<[DepNodeIndex; 8]>,
1129 read_set: FxHashSet<DepNodeIndex>,
1132 // A data structure that stores Option<DepNodeColor> values as a contiguous
1133 // array, using one u32 per entry.
1134 struct DepNodeColorMap {
1135 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1138 const COMPRESSED_NONE: u32 = 0;
1139 const COMPRESSED_RED: u32 = 1;
1140 const COMPRESSED_FIRST_GREEN: u32 = 2;
1142 impl DepNodeColorMap {
1143 fn new(size: usize) -> DepNodeColorMap {
1145 values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect(),
1149 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1150 match self.values[index].load(Ordering::Acquire) {
1151 COMPRESSED_NONE => None,
1152 COMPRESSED_RED => Some(DepNodeColor::Red),
1153 value => Some(DepNodeColor::Green(DepNodeIndex::from_u32(
1154 value - COMPRESSED_FIRST_GREEN
1159 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1160 self.values[index].store(match color {
1161 DepNodeColor::Red => COMPRESSED_RED,
1162 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1163 }, Ordering::Release)