1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use errors::DiagnosticBuilder;
12 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
13 StableHashingContextProvider};
14 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
15 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
16 use rustc_data_structures::sync::Lrc;
17 use std::cell::{Ref, RefCell};
21 use util::common::{ProfileQueriesMsg, profq_msg};
25 use super::debug::EdgeFilter;
26 use super::dep_node::{DepNode, DepKind, WorkProductId};
27 use super::query::DepGraphQuery;
29 use super::safe::DepGraphSafe;
30 use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
31 use super::prev::PreviousDepGraph;
35 data: Option<Lrc<DepGraphData>>,
37 // A vector mapping depnodes from the current graph to their associated
38 // result value fingerprints. Do not rely on the length of this vector
39 // being the same as the number of nodes in the graph. The vector can
40 // contain an arbitrary number of zero-entries at the end.
41 fingerprints: Lrc<RefCell<IndexVec<DepNodeIndex, Fingerprint>>>
45 newtype_index!(DepNodeIndex);
48 const INVALID: DepNodeIndex = DepNodeIndex(::std::u32::MAX);
51 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
52 pub enum DepNodeColor {
58 pub fn is_green(self) -> bool {
60 DepNodeColor::Red => false,
61 DepNodeColor::Green(_) => true,
67 /// The new encoding of the dependency graph, optimized for red/green
68 /// tracking. The `current` field is the dependency graph of only the
69 /// current compilation session: We don't merge the previous dep-graph into
70 /// current one anymore.
71 current: RefCell<CurrentDepGraph>,
73 /// The dep-graph from the previous compilation session. It contains all
74 /// nodes and edges as well as all fingerprints of nodes that have them.
75 previous: PreviousDepGraph,
77 colors: RefCell<DepNodeColorMap>,
79 /// When we load, there may be `.o` files, cached mir, or other such
80 /// things available to us. If we find that they are not dirty, we
81 /// load the path to the file storing those work-products here into
82 /// this map. We can later look for and extract that data.
83 previous_work_products: RefCell<FxHashMap<WorkProductId, WorkProduct>>,
85 /// Work-products that we generate in this run.
86 work_products: RefCell<FxHashMap<WorkProductId, WorkProduct>>,
88 dep_node_debug: RefCell<FxHashMap<DepNode, String>>,
90 // Used for testing, only populated when -Zquery-dep-graph is specified.
91 loaded_from_cache: RefCell<FxHashMap<DepNodeIndex, bool>>,
96 pub fn new(prev_graph: PreviousDepGraph) -> DepGraph {
97 // Pre-allocate the fingerprints array. We over-allocate a little so
98 // that we hopefully don't have to re-allocate during this compilation
100 let prev_graph_node_count = prev_graph.node_count();
102 let fingerprints = IndexVec::from_elem_n(Fingerprint::ZERO,
103 (prev_graph_node_count * 115) / 100);
105 data: Some(Lrc::new(DepGraphData {
106 previous_work_products: RefCell::new(FxHashMap()),
107 work_products: RefCell::new(FxHashMap()),
108 dep_node_debug: RefCell::new(FxHashMap()),
109 current: RefCell::new(CurrentDepGraph::new()),
110 previous: prev_graph,
111 colors: RefCell::new(DepNodeColorMap::new(prev_graph_node_count)),
112 loaded_from_cache: RefCell::new(FxHashMap()),
114 fingerprints: Lrc::new(RefCell::new(fingerprints)),
118 pub fn new_disabled() -> DepGraph {
121 fingerprints: Lrc::new(RefCell::new(IndexVec::new())),
125 /// True if we are actually building the full dep-graph.
127 pub fn is_fully_enabled(&self) -> bool {
131 pub fn query(&self) -> DepGraphQuery {
132 let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
133 let nodes: Vec<_> = current_dep_graph.nodes.iter().cloned().collect();
134 let mut edges = Vec::new();
135 for (index, edge_targets) in current_dep_graph.edges.iter_enumerated() {
136 let from = current_dep_graph.nodes[index];
137 for &edge_target in edge_targets {
138 let to = current_dep_graph.nodes[edge_target];
139 edges.push((from, to));
143 DepGraphQuery::new(&nodes[..], &edges[..])
146 pub fn assert_ignored(&self)
148 if let Some(ref data) = self.data {
149 match data.current.borrow().task_stack.last() {
150 Some(&OpenTask::Ignore) | None => {
153 _ => panic!("expected an ignore context")
158 pub fn with_ignore<OP,R>(&self, op: OP) -> R
159 where OP: FnOnce() -> R
161 let _task = self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.current));
165 /// Starts a new dep-graph task. Dep-graph tasks are specified
166 /// using a free function (`task`) and **not** a closure -- this
167 /// is intentional because we want to exercise tight control over
168 /// what state they have access to. In particular, we want to
169 /// prevent implicit 'leaks' of tracked state into the task (which
170 /// could then be read without generating correct edges in the
171 /// dep-graph -- see the [rustc guide] for more details on
172 /// the dep-graph). To this end, the task function gets exactly two
173 /// pieces of state: the context `cx` and an argument `arg`. Both
174 /// of these bits of state must be of some type that implements
175 /// `DepGraphSafe` and hence does not leak.
177 /// The choice of two arguments is not fundamental. One argument
178 /// would work just as well, since multiple values can be
179 /// collected using tuples. However, using two arguments works out
180 /// to be quite convenient, since it is common to need a context
181 /// (`cx`) and some argument (e.g., a `DefId` identifying what
182 /// item to process).
184 /// For cases where you need some other number of arguments:
186 /// - If you only need one argument, just use `()` for the `arg`
188 /// - If you need 3+ arguments, use a tuple for the
191 /// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/incremental-compilation.html
192 pub fn with_task<C, A, R, HCX>(&self,
198 where C: DepGraphSafe + StableHashingContextProvider<ContextType=HCX>,
201 self.with_task_impl(key, cx, arg, task,
202 |data, key| data.borrow_mut().push_task(key),
203 |data, key| data.borrow_mut().pop_task(key))
206 fn with_task_impl<C, A, R, HCX>(&self,
211 push: fn(&RefCell<CurrentDepGraph>, DepNode),
212 pop: fn(&RefCell<CurrentDepGraph>, DepNode) -> DepNodeIndex)
214 where C: DepGraphSafe + StableHashingContextProvider<ContextType=HCX>,
217 if let Some(ref data) = self.data {
218 push(&data.current, key);
219 if cfg!(debug_assertions) {
220 profq_msg(ProfileQueriesMsg::TaskBegin(key.clone()))
223 // In incremental mode, hash the result of the task. We don't
224 // do anything with the hash yet, but we are computing it
226 // - we make sure that the infrastructure works and
227 // - we can get an idea of the runtime cost.
228 let mut hcx = cx.create_stable_hashing_context();
230 let result = task(cx, arg);
231 if cfg!(debug_assertions) {
232 profq_msg(ProfileQueriesMsg::TaskEnd)
235 let dep_node_index = pop(&data.current, key);
237 let mut stable_hasher = StableHasher::new();
238 result.hash_stable(&mut hcx, &mut stable_hasher);
240 let current_fingerprint = stable_hasher.finish();
242 // Store the current fingerprint
244 let mut fingerprints = self.fingerprints.borrow_mut();
246 if dep_node_index.index() >= fingerprints.len() {
247 fingerprints.resize(dep_node_index.index() + 1, Fingerprint::ZERO);
250 debug_assert!(fingerprints[dep_node_index] == Fingerprint::ZERO,
251 "DepGraph::with_task() - Duplicate fingerprint \
252 insertion for {:?}", key);
253 fingerprints[dep_node_index] = current_fingerprint;
256 // Determine the color of the new DepNode.
257 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
258 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
260 let color = if current_fingerprint == prev_fingerprint {
261 DepNodeColor::Green(dep_node_index)
266 let mut colors = data.colors.borrow_mut();
267 debug_assert!(colors.get(prev_index).is_none(),
268 "DepGraph::with_task() - Duplicate DepNodeColor \
269 insertion for {:?}", key);
271 colors.insert(prev_index, color);
274 (result, dep_node_index)
276 if key.kind.fingerprint_needed_for_crate_hash() {
277 let mut hcx = cx.create_stable_hashing_context();
278 let result = task(cx, arg);
279 let mut stable_hasher = StableHasher::new();
280 result.hash_stable(&mut hcx, &mut stable_hasher);
281 let fingerprint = stable_hasher.finish();
283 let mut fingerprints = self.fingerprints.borrow_mut();
284 let dep_node_index = DepNodeIndex::new(fingerprints.len());
285 fingerprints.push(fingerprint);
287 debug_assert!(fingerprints[dep_node_index] == fingerprint,
288 "DepGraph::with_task() - Assigned fingerprint to \
289 unexpected index for {:?}", key);
291 (result, dep_node_index)
293 (task(cx, arg), DepNodeIndex::INVALID)
298 /// Execute something within an "anonymous" task, that is, a task the
299 /// DepNode of which is determined by the list of inputs it read from.
300 pub fn with_anon_task<OP,R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
301 where OP: FnOnce() -> R
303 if let Some(ref data) = self.data {
304 data.current.borrow_mut().push_anon_task();
306 let dep_node_index = data.current
308 .pop_anon_task(dep_kind);
309 (result, dep_node_index)
311 (op(), DepNodeIndex::INVALID)
315 /// Execute something within an "eval-always" task which is a task
316 // that runs whenever anything changes.
317 pub fn with_eval_always_task<C, A, R, HCX>(&self,
323 where C: DepGraphSafe + StableHashingContextProvider<ContextType=HCX>,
326 self.with_task_impl(key, cx, arg, task,
327 |data, key| data.borrow_mut().push_eval_always_task(key),
328 |data, key| data.borrow_mut().pop_eval_always_task(key))
332 pub fn read(&self, v: DepNode) {
333 if let Some(ref data) = self.data {
334 let mut current = data.current.borrow_mut();
335 if let Some(&dep_node_index) = current.node_to_node_index.get(&v) {
336 current.read_index(dep_node_index);
338 bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
344 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
345 if let Some(ref data) = self.data {
346 data.current.borrow_mut().read_index(dep_node_index);
351 pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
364 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
365 if let Some(ref data) = self.data {
366 data.current.borrow_mut().node_to_node_index.contains_key(dep_node)
373 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
374 match self.fingerprints.borrow().get(dep_node_index) {
375 Some(&fingerprint) => fingerprint,
377 if let Some(ref data) = self.data {
378 let dep_node = data.current.borrow().nodes[dep_node_index];
379 bug!("Could not find current fingerprint for {:?}", dep_node)
381 bug!("Could not find current fingerprint for {:?}", dep_node_index)
387 pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
388 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
392 pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
393 self.data.as_ref().unwrap().previous.node_to_index(dep_node)
396 /// Indicates that a previous work product exists for `v`. This is
397 /// invoked during initial start-up based on what nodes are clean
398 /// (and what files exist in the incr. directory).
399 pub fn insert_previous_work_product(&self, v: &WorkProductId, data: WorkProduct) {
400 debug!("insert_previous_work_product({:?}, {:?})", v, data);
404 .previous_work_products
406 .insert(v.clone(), data);
409 /// Indicates that we created the given work-product in this run
410 /// for `v`. This record will be preserved and loaded in the next
412 pub fn insert_work_product(&self, v: &WorkProductId, data: WorkProduct) {
413 debug!("insert_work_product({:?}, {:?})", v, data);
419 .insert(v.clone(), data);
422 /// Check whether a previous work product exists for `v` and, if
423 /// so, return the path that leads to it. Used to skip doing work.
424 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
428 data.previous_work_products.borrow().get(v).cloned()
432 /// Access the map of work-products created during this run. Only
433 /// used during saving of the dep-graph.
434 pub fn work_products(&self) -> Ref<FxHashMap<WorkProductId, WorkProduct>> {
435 self.data.as_ref().unwrap().work_products.borrow()
438 /// Access the map of work-products created during the cached run. Only
439 /// used during saving of the dep-graph.
440 pub fn previous_work_products(&self) -> Ref<FxHashMap<WorkProductId, WorkProduct>> {
441 self.data.as_ref().unwrap().previous_work_products.borrow()
445 pub fn register_dep_node_debug_str<F>(&self,
448 where F: FnOnce() -> String
450 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
452 if dep_node_debug.borrow().contains_key(&dep_node) {
455 let debug_str = debug_str_gen();
456 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
459 pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
460 self.data.as_ref().and_then(|t| t.dep_node_debug.borrow().get(&dep_node).cloned())
463 pub fn edge_deduplication_data(&self) -> (u64, u64) {
464 let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
466 (current_dep_graph.total_read_count, current_dep_graph.total_duplicate_read_count)
469 pub fn serialize(&self) -> SerializedDepGraph {
470 let mut fingerprints = self.fingerprints.borrow_mut();
471 let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
473 // Make sure we don't run out of bounds below.
474 if current_dep_graph.nodes.len() > fingerprints.len() {
475 fingerprints.resize(current_dep_graph.nodes.len(), Fingerprint::ZERO);
478 let nodes: IndexVec<_, (DepNode, Fingerprint)> =
479 current_dep_graph.nodes.iter_enumerated().map(|(idx, &dep_node)| {
480 (dep_node, fingerprints[idx])
483 let total_edge_count: usize = current_dep_graph.edges.iter()
487 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
488 let mut edge_list_data = Vec::with_capacity(total_edge_count);
490 for (current_dep_node_index, edges) in current_dep_graph.edges.iter_enumerated() {
491 let start = edge_list_data.len() as u32;
492 // This should really just be a memcpy :/
493 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
494 let end = edge_list_data.len() as u32;
496 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
497 edge_list_indices.push((start, end));
500 debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
501 debug_assert_eq!(edge_list_data.len(), total_edge_count);
510 pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
511 if let Some(ref data) = self.data {
512 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
513 return data.colors.borrow().get(prev_index)
515 // This is a node that did not exist in the previous compilation
516 // session, so we consider it to be red.
517 return Some(DepNodeColor::Red)
524 pub fn try_mark_green<'tcx>(&self,
525 tcx: TyCtxt<'_, 'tcx, 'tcx>,
527 -> Option<DepNodeIndex> {
528 debug!("try_mark_green({:?}) - BEGIN", dep_node);
529 let data = self.data.as_ref().unwrap();
531 debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node));
533 if dep_node.kind.is_input() {
534 // We should only hit try_mark_green() for inputs that do not exist
535 // anymore in the current compilation session. Existing inputs are
536 // eagerly marked as either red/green before any queries are
538 debug_assert!(dep_node.extract_def_id(tcx).is_none());
539 debug!("try_mark_green({:?}) - END - DepNode is deleted input", dep_node);
543 let (prev_deps, prev_dep_node_index) = match data.previous.edges_from(dep_node) {
545 // This DepNode and the corresponding query invocation existed
546 // in the previous compilation session too, so we can try to
547 // mark it as green by recursively marking all of its
548 // dependencies green.
552 // This DepNode did not exist in the previous compilation session,
553 // so we cannot mark it as green.
554 debug!("try_mark_green({:?}) - END - DepNode does not exist in \
555 current compilation session anymore", dep_node);
560 debug_assert!(data.colors.borrow().get(prev_dep_node_index).is_none());
562 let mut current_deps = Vec::new();
564 for &dep_dep_node_index in prev_deps {
565 let dep_dep_node_color = data.colors.borrow().get(dep_dep_node_index);
567 match dep_dep_node_color {
568 Some(DepNodeColor::Green(node_index)) => {
569 // This dependency has been marked as green before, we are
570 // still fine and can continue with checking the other
572 debug!("try_mark_green({:?}) --- found dependency {:?} to \
573 be immediately green",
575 data.previous.index_to_node(dep_dep_node_index));
576 current_deps.push(node_index);
578 Some(DepNodeColor::Red) => {
579 // We found a dependency the value of which has changed
580 // compared to the previous compilation session. We cannot
581 // mark the DepNode as green and also don't need to bother
582 // with checking any of the other dependencies.
583 debug!("try_mark_green({:?}) - END - dependency {:?} was \
586 data.previous.index_to_node(dep_dep_node_index));
590 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
592 // We don't know the state of this dependency. If it isn't
593 // an input node, let's try to mark it green recursively.
594 if !dep_dep_node.kind.is_input() {
595 debug!("try_mark_green({:?}) --- state of dependency {:?} \
596 is unknown, trying to mark it green", dep_node,
599 if let Some(node_index) = self.try_mark_green(tcx, dep_dep_node) {
600 debug!("try_mark_green({:?}) --- managed to MARK \
601 dependency {:?} as green", dep_node, dep_dep_node);
602 current_deps.push(node_index);
606 match dep_dep_node.kind {
609 DepKind::CrateMetadata => {
610 if dep_node.extract_def_id(tcx).is_none() {
611 // If the node does not exist anymore, we
612 // just fail to mark green.
615 // If the node does exist, it should have
616 // been pre-allocated.
617 bug!("DepNode {:?} should have been \
618 pre-allocated but wasn't.",
623 // For other kinds of inputs it's OK to be
629 // We failed to mark it green, so we try to force the query.
630 debug!("try_mark_green({:?}) --- trying to force \
631 dependency {:?}", dep_node, dep_dep_node);
632 if ::ty::maps::force_from_dep_node(tcx, dep_dep_node) {
633 let dep_dep_node_color = data.colors.borrow().get(dep_dep_node_index);
635 match dep_dep_node_color {
636 Some(DepNodeColor::Green(node_index)) => {
637 debug!("try_mark_green({:?}) --- managed to \
638 FORCE dependency {:?} to green",
639 dep_node, dep_dep_node);
640 current_deps.push(node_index);
642 Some(DepNodeColor::Red) => {
643 debug!("try_mark_green({:?}) - END - \
644 dependency {:?} was red after forcing",
650 bug!("try_mark_green() - Forcing the DepNode \
651 should have set its color")
655 // The DepNode could not be forced.
656 debug!("try_mark_green({:?}) - END - dependency {:?} \
657 could not be forced", dep_node, dep_dep_node);
665 // If we got here without hitting a `return` that means that all
666 // dependencies of this DepNode could be marked as green. Therefore we
667 // can also mark this DepNode as green. We do so by...
669 // ... allocating an entry for it in the current dependency graph and
670 // adding all the appropriate edges imported from the previous graph ...
671 let dep_node_index = data.current
673 .alloc_node(*dep_node, current_deps);
675 // ... copying the fingerprint from the previous graph too, so we don't
676 // have to recompute it ...
678 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
679 let mut fingerprints = self.fingerprints.borrow_mut();
681 if dep_node_index.index() >= fingerprints.len() {
682 fingerprints.resize(dep_node_index.index() + 1, Fingerprint::ZERO);
685 debug_assert!(fingerprints[dep_node_index] == Fingerprint::ZERO,
686 "DepGraph::try_mark_green() - Duplicate fingerprint \
687 insertion for {:?}", dep_node);
689 fingerprints[dep_node_index] = fingerprint;
692 // ... emitting any stored diagnostic ...
694 let diagnostics = tcx.on_disk_query_result_cache
695 .load_diagnostics(tcx, prev_dep_node_index);
697 if diagnostics.len() > 0 {
698 let handle = tcx.sess.diagnostic();
700 // Promote the previous diagnostics to the current session.
701 tcx.on_disk_query_result_cache
702 .store_diagnostics(dep_node_index, diagnostics.clone());
704 for diagnostic in diagnostics {
705 DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
710 // ... and finally storing a "Green" entry in the color map.
711 let mut colors = data.colors.borrow_mut();
712 debug_assert!(colors.get(prev_dep_node_index).is_none(),
713 "DepGraph::try_mark_green() - Duplicate DepNodeColor \
714 insertion for {:?}", dep_node);
716 colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
718 debug!("try_mark_green({:?}) - END - successfully marked as green", dep_node);
722 // Returns true if the given node has been marked as green during the
723 // current compilation session. Used in various assertions
724 pub fn is_green(&self, dep_node: &DepNode) -> bool {
725 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
728 // This method loads all on-disk cacheable query results into memory, so
729 // they can be written out to the new cache file again. Most query results
730 // will already be in memory but in the case where we marked something as
731 // green but then did not need the value, that value will never have been
734 // This method will only load queries that will end up in the disk cache.
735 // Other queries will not be executed.
736 pub fn exec_cache_promotions<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) {
737 let green_nodes: Vec<DepNode> = {
738 let data = self.data.as_ref().unwrap();
739 let colors = data.colors.borrow();
740 colors.values.indices().filter_map(|prev_index| {
741 match colors.get(prev_index) {
742 Some(DepNodeColor::Green(_)) => {
743 let dep_node = data.previous.index_to_node(prev_index);
744 if dep_node.cache_on_disk(tcx) {
751 Some(DepNodeColor::Red) => {
752 // We can skip red nodes because a node can only be marked
753 // as red if the query result was recomputed and thus is
754 // already in memory.
761 for dep_node in green_nodes {
762 dep_node.load_from_on_disk_cache(tcx);
766 pub fn mark_loaded_from_cache(&self, dep_node_index: DepNodeIndex, state: bool) {
767 debug!("mark_loaded_from_cache({:?}, {})",
768 self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index],
776 .insert(dep_node_index, state);
779 pub fn was_loaded_from_cache(&self, dep_node: &DepNode) -> Option<bool> {
780 let data = self.data.as_ref().unwrap();
781 let dep_node_index = data.current.borrow().node_to_node_index[dep_node];
782 data.loaded_from_cache.borrow().get(&dep_node_index).cloned()
786 /// A "work product" is an intermediate result that we save into the
787 /// incremental directory for later re-use. The primary example are
788 /// the object files that we save for each partition at code
791 /// Each work product is associated with a dep-node, representing the
792 /// process that produced the work-product. If that dep-node is found
793 /// to be dirty when we load up, then we will delete the work-product
794 /// at load time. If the work-product is found to be clean, then we
795 /// will keep a record in the `previous_work_products` list.
797 /// In addition, work products have an associated hash. This hash is
798 /// an extra hash that can be used to decide if the work-product from
799 /// a previous compilation can be re-used (in addition to the dirty
802 /// As the primary example, consider the object files we generate for
803 /// each partition. In the first run, we create partitions based on
804 /// the symbols that need to be compiled. For each partition P, we
805 /// hash the symbols in P and create a `WorkProduct` record associated
806 /// with `DepNode::TransPartition(P)`; the hash is the set of symbols
809 /// The next time we compile, if the `DepNode::TransPartition(P)` is
810 /// judged to be clean (which means none of the things we read to
811 /// generate the partition were found to be dirty), it will be loaded
812 /// into previous work products. We will then regenerate the set of
813 /// symbols in the partition P and hash them (note that new symbols
814 /// may be added -- for example, new monomorphizations -- even if
815 /// nothing in P changed!). We will compare that hash against the
816 /// previous hash. If it matches up, we can reuse the object file.
817 #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
818 pub struct WorkProduct {
819 pub cgu_name: String,
820 /// Saved files associated with this CGU
821 pub saved_files: Vec<(WorkProductFileKind, String)>,
824 #[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable)]
825 pub enum WorkProductFileKind {
831 pub(super) struct CurrentDepGraph {
832 nodes: IndexVec<DepNodeIndex, DepNode>,
833 edges: IndexVec<DepNodeIndex, Vec<DepNodeIndex>>,
834 node_to_node_index: FxHashMap<DepNode, DepNodeIndex>,
835 task_stack: Vec<OpenTask>,
836 forbidden_edge: Option<EdgeFilter>,
838 // Anonymous DepNodes are nodes the ID of which we compute from the list of
839 // their edges. This has the beneficial side-effect that multiple anonymous
840 // nodes can be coalesced into one without changing the semantics of the
841 // dependency graph. However, the merging of nodes can lead to a subtle
842 // problem during red-green marking: The color of an anonymous node from
843 // the current session might "shadow" the color of the node with the same
844 // ID from the previous session. In order to side-step this problem, we make
845 // sure that anon-node IDs allocated in different sessions don't overlap.
846 // This is implemented by mixing a session-key into the ID fingerprint of
847 // each anon node. The session-key is just a random number generated when
848 // the DepGraph is created.
849 anon_id_seed: Fingerprint,
851 total_read_count: u64,
852 total_duplicate_read_count: u64,
855 impl CurrentDepGraph {
856 fn new() -> CurrentDepGraph {
857 use std::time::{SystemTime, UNIX_EPOCH};
859 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
860 let nanos = duration.as_secs() * 1_000_000_000 +
861 duration.subsec_nanos() as u64;
862 let mut stable_hasher = StableHasher::new();
863 nanos.hash(&mut stable_hasher);
865 let forbidden_edge = if cfg!(debug_assertions) {
866 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
868 match EdgeFilter::new(&s) {
870 Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
880 nodes: IndexVec::new(),
881 edges: IndexVec::new(),
882 node_to_node_index: FxHashMap(),
883 anon_id_seed: stable_hasher.finish(),
884 task_stack: Vec::new(),
887 total_duplicate_read_count: 0,
891 pub(super) fn push_ignore(&mut self) {
892 self.task_stack.push(OpenTask::Ignore);
895 pub(super) fn pop_ignore(&mut self) {
896 let popped_node = self.task_stack.pop().unwrap();
897 debug_assert_eq!(popped_node, OpenTask::Ignore);
900 pub(super) fn push_task(&mut self, key: DepNode) {
901 self.task_stack.push(OpenTask::Regular {
904 read_set: FxHashSet(),
908 pub(super) fn pop_task(&mut self, key: DepNode) -> DepNodeIndex {
909 let popped_node = self.task_stack.pop().unwrap();
911 if let OpenTask::Regular {
916 assert_eq!(node, key);
918 // If this is an input node, we expect that it either has no
919 // dependencies, or that it just depends on DepKind::CrateMetadata
920 // or DepKind::Krate. This happens for some "thin wrapper queries"
921 // like `crate_disambiguator` which sometimes have zero deps (for
922 // when called for LOCAL_CRATE) or they depend on a CrateMetadata
924 if cfg!(debug_assertions) {
925 if node.kind.is_input() && reads.len() > 0 &&
926 // FIXME(mw): Special case for DefSpan until Spans are handled
927 // better in general.
928 node.kind != DepKind::DefSpan &&
929 reads.iter().any(|&i| {
930 !(self.nodes[i].kind == DepKind::CrateMetadata ||
931 self.nodes[i].kind == DepKind::Krate)
934 bug!("Input node {:?} with unexpected reads: {:?}",
936 reads.iter().map(|&i| self.nodes[i]).collect::<Vec<_>>())
940 self.alloc_node(node, reads)
942 bug!("pop_task() - Expected regular task to be popped")
946 fn push_anon_task(&mut self) {
947 self.task_stack.push(OpenTask::Anon {
949 read_set: FxHashSet(),
953 fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex {
954 let popped_node = self.task_stack.pop().unwrap();
956 if let OpenTask::Anon {
960 debug_assert!(!kind.is_input());
962 let mut fingerprint = self.anon_id_seed;
963 let mut hasher = StableHasher::new();
965 for &read in reads.iter() {
966 let read_dep_node = self.nodes[read];
968 ::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher);
970 // Fingerprint::combine() is faster than sending Fingerprint
971 // through the StableHasher (at least as long as StableHasher
973 fingerprint = fingerprint.combine(read_dep_node.hash);
976 fingerprint = fingerprint.combine(hasher.finish());
978 let target_dep_node = DepNode {
983 if let Some(&index) = self.node_to_node_index.get(&target_dep_node) {
986 self.alloc_node(target_dep_node, reads)
989 bug!("pop_anon_task() - Expected anonymous task to be popped")
993 fn push_eval_always_task(&mut self, key: DepNode) {
994 self.task_stack.push(OpenTask::EvalAlways { node: key });
997 fn pop_eval_always_task(&mut self, key: DepNode) -> DepNodeIndex {
998 let popped_node = self.task_stack.pop().unwrap();
1000 if let OpenTask::EvalAlways {
1003 debug_assert_eq!(node, key);
1004 let krate_idx = self.node_to_node_index[&DepNode::new_no_params(DepKind::Krate)];
1005 self.alloc_node(node, vec![krate_idx])
1007 bug!("pop_eval_always_task() - Expected eval always task to be popped");
1011 fn read_index(&mut self, source: DepNodeIndex) {
1012 match self.task_stack.last_mut() {
1013 Some(&mut OpenTask::Regular {
1018 self.total_read_count += 1;
1019 if read_set.insert(source) {
1022 if cfg!(debug_assertions) {
1023 if let Some(ref forbidden_edge) = self.forbidden_edge {
1024 let source = self.nodes[source];
1025 if forbidden_edge.test(&source, &target) {
1026 bug!("forbidden edge {:?} -> {:?} created",
1033 self.total_duplicate_read_count += 1;
1036 Some(&mut OpenTask::Anon {
1040 if read_set.insert(source) {
1044 Some(&mut OpenTask::Ignore) |
1045 Some(&mut OpenTask::EvalAlways { .. }) | None => {
1051 fn alloc_node(&mut self,
1053 edges: Vec<DepNodeIndex>)
1055 debug_assert_eq!(self.edges.len(), self.nodes.len());
1056 debug_assert_eq!(self.node_to_node_index.len(), self.nodes.len());
1057 debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
1058 let dep_node_index = DepNodeIndex::new(self.nodes.len());
1059 self.nodes.push(dep_node);
1060 self.node_to_node_index.insert(dep_node, dep_node_index);
1061 self.edges.push(edges);
1066 #[derive(Clone, Debug, PartialEq)]
1070 reads: Vec<DepNodeIndex>,
1071 read_set: FxHashSet<DepNodeIndex>,
1074 reads: Vec<DepNodeIndex>,
1075 read_set: FxHashSet<DepNodeIndex>,
1083 // A data structure that stores Option<DepNodeColor> values as a contiguous
1084 // array, using one u32 per entry.
1085 struct DepNodeColorMap {
1086 values: IndexVec<SerializedDepNodeIndex, u32>,
1089 const COMPRESSED_NONE: u32 = 0;
1090 const COMPRESSED_RED: u32 = 1;
1091 const COMPRESSED_FIRST_GREEN: u32 = 2;
1093 impl DepNodeColorMap {
1094 fn new(size: usize) -> DepNodeColorMap {
1096 values: IndexVec::from_elem_n(COMPRESSED_NONE, size)
1100 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1101 match self.values[index] {
1102 COMPRESSED_NONE => None,
1103 COMPRESSED_RED => Some(DepNodeColor::Red),
1104 value => Some(DepNodeColor::Green(DepNodeIndex(value - COMPRESSED_FIRST_GREEN)))
1108 fn insert(&mut self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1109 self.values[index] = match color {
1110 DepNodeColor::Red => COMPRESSED_RED,
1111 DepNodeColor::Green(index) => index.0 + COMPRESSED_FIRST_GREEN,