1 use parking_lot::Mutex;
2 use rustc_data_structures::fingerprint::Fingerprint;
3 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
4 use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
5 use rustc_data_structures::sharded::{self, Sharded};
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_data_structures::steal::Steal;
8 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
9 use rustc_index::vec::IndexVec;
10 use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
11 use smallvec::{smallvec, SmallVec};
12 use std::collections::hash_map::Entry;
15 use std::marker::PhantomData;
16 use std::sync::atomic::Ordering::Relaxed;
18 use super::query::DepGraphQuery;
19 use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
20 use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
21 use crate::ich::StableHashingContext;
22 use crate::query::{QueryContext, QuerySideEffects};
24 #[cfg(debug_assertions)]
25 use {super::debug::EdgeFilter, std::env};
28 pub struct DepGraph<K: DepKind> {
29 data: Option<Lrc<DepGraphData<K>>>,
31 /// This field is used for assigning DepNodeIndices when running in
32 /// non-incremental mode. Even in non-incremental mode we make sure that
33 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
34 /// ID is used for self-profiling.
35 virtual_dep_node_index: Lrc<AtomicU32>,
37 /// The cached event id for profiling node interning. This saves us
38 /// from having to look up the event id every time we intern a node
39 /// which may incur too much overhead.
40 /// This will be None if self-profiling is disabled.
41 node_intern_event_id: Option<EventId>,
44 rustc_index::newtype_index! {
45 pub struct DepNodeIndex { .. }
49 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
50 pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
53 impl std::convert::From<DepNodeIndex> for QueryInvocationId {
55 fn from(dep_node_index: DepNodeIndex) -> Self {
56 QueryInvocationId(dep_node_index.as_u32())
61 pub enum DepNodeColor {
67 pub fn is_green(self) -> bool {
69 DepNodeColor::Red => false,
70 DepNodeColor::Green(_) => true,
75 struct DepGraphData<K: DepKind> {
76 /// The new encoding of the dependency graph, optimized for red/green
77 /// tracking. The `current` field is the dependency graph of only the
78 /// current compilation session: We don't merge the previous dep-graph into
79 /// current one anymore, but we do reference shared data to save space.
80 current: CurrentDepGraph<K>,
82 /// The dep-graph from the previous compilation session. It contains all
83 /// nodes and edges as well as all fingerprints of nodes that have them.
84 previous: SerializedDepGraph<K>,
86 colors: DepNodeColorMap,
88 processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
90 /// When we load, there may be `.o` files, cached MIR, or other such
91 /// things available to us. If we find that they are not dirty, we
92 /// load the path to the file storing those work-products here into
93 /// this map. We can later look for and extract that data.
94 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
96 dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
99 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
101 R: for<'a> HashStable<StableHashingContext<'a>>,
103 let mut stable_hasher = StableHasher::new();
104 result.hash_stable(hcx, &mut stable_hasher);
106 Some(stable_hasher.finish())
109 impl<K: DepKind> DepGraph<K> {
111 profiler: &SelfProfilerRef,
112 prev_graph: SerializedDepGraph<K>,
113 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
114 encoder: FileEncoder,
118 let prev_graph_node_count = prev_graph.node_count();
121 CurrentDepGraph::new(prev_graph_node_count, encoder, record_graph, record_stats);
123 // Instantiate a dependy-less node only once for anonymous queries.
124 let _green_node_index = current.intern_new_node(
126 DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
130 debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
132 let node_intern_event_id = profiler
133 .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
134 .map(EventId::from_label);
137 data: Some(Lrc::new(DepGraphData {
138 previous_work_products: prev_work_products,
139 dep_node_debug: Default::default(),
141 processed_side_effects: Default::default(),
142 previous: prev_graph,
143 colors: DepNodeColorMap::new(prev_graph_node_count),
145 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
146 node_intern_event_id,
150 pub fn new_disabled() -> DepGraph<K> {
153 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
154 node_intern_event_id: None,
158 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
160 pub fn is_fully_enabled(&self) -> bool {
164 pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
165 if let Some(data) = &self.data {
166 data.current.encoder.borrow().with_query(f)
170 pub fn assert_ignored(&self) {
171 if let Some(..) = self.data {
172 K::read_deps(|task_deps| {
173 assert!(task_deps.is_none(), "expected no task dependency tracking");
178 pub fn with_ignore<OP, R>(&self, op: OP) -> R
182 K::with_deps(None, op)
185 /// Starts a new dep-graph task. Dep-graph tasks are specified
186 /// using a free function (`task`) and **not** a closure -- this
187 /// is intentional because we want to exercise tight control over
188 /// what state they have access to. In particular, we want to
189 /// prevent implicit 'leaks' of tracked state into the task (which
190 /// could then be read without generating correct edges in the
191 /// dep-graph -- see the [rustc dev guide] for more details on
192 /// the dep-graph). To this end, the task function gets exactly two
193 /// pieces of state: the context `cx` and an argument `arg`. Both
194 /// of these bits of state must be of some type that implements
195 /// `DepGraphSafe` and hence does not leak.
197 /// The choice of two arguments is not fundamental. One argument
198 /// would work just as well, since multiple values can be
199 /// collected using tuples. However, using two arguments works out
200 /// to be quite convenient, since it is common to need a context
201 /// (`cx`) and some argument (e.g., a `DefId` identifying what
202 /// item to process).
204 /// For cases where you need some other number of arguments:
206 /// - If you only need one argument, just use `()` for the `arg`
208 /// - If you need 3+ arguments, use a tuple for the
211 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
212 pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
217 task: fn(Ctxt, A) -> R,
218 hash_result: fn(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
219 ) -> (R, DepNodeIndex) {
220 if self.is_fully_enabled() {
221 self.with_task_impl(key, cx, arg, task, hash_result)
223 // Incremental compilation is turned off. We just execute the task
224 // without tracking. We still provide a dep-node index that uniquely
225 // identifies the task so that we have a cheap way of referring to
226 // the query for self-profiling.
227 (task(cx, arg), self.next_virtual_depnode_index())
231 fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
236 task: fn(Ctxt, A) -> R,
237 hash_result: fn(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
238 ) -> (R, DepNodeIndex) {
239 // This function is only called when the graph is enabled.
240 let data = self.data.as_ref().unwrap();
242 // If the following assertion triggers, it can have two reasons:
243 // 1. Something is wrong with DepNode creation, either here or
244 // in `DepGraph::try_mark_green()`.
245 // 2. Two distinct query keys get mapped to the same `DepNode`
246 // (see for example #48923).
248 !self.dep_node_exists(&key),
249 "forcing query with already existing `DepNode`\n\
256 let task_deps = if key.kind.is_eval_always() {
259 Some(Lock::new(TaskDeps {
260 #[cfg(debug_assertions)]
262 reads: SmallVec::new(),
263 read_set: Default::default(),
264 phantom_data: PhantomData,
267 let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
268 let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
270 let dcx = cx.dep_context();
271 let mut hcx = dcx.create_stable_hashing_context();
272 let hashing_timer = dcx.profiler().incr_result_hashing();
273 let current_fingerprint = hash_result(&mut hcx, &result);
275 let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
277 // Get timer for profiling `DepNode` interning
278 let node_intern_timer =
279 self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
280 // Intern the new `DepNode`.
281 let (dep_node_index, prev_and_color) = data.current.intern_node(
289 drop(node_intern_timer);
291 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
293 if let Some((prev_index, color)) = prev_and_color {
295 data.colors.get(prev_index).is_none(),
296 "DepGraph::with_task() - Duplicate DepNodeColor \
301 data.colors.insert(prev_index, color);
304 (result, dep_node_index)
307 /// Executes something within an "anonymous" task, that is, a task the
308 /// `DepNode` of which is determined by the list of inputs it read from.
309 pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
314 ) -> (R, DepNodeIndex)
318 debug_assert!(!dep_kind.is_eval_always());
320 if let Some(ref data) = self.data {
321 let task_deps = Lock::new(TaskDeps::default());
322 let result = K::with_deps(Some(&task_deps), op);
323 let task_deps = task_deps.into_inner();
324 let task_deps = task_deps.reads;
326 let dep_node_index = match task_deps.len() {
328 // Because the dep-node id of anon nodes is computed from the sets of its
329 // dependencies we already know what the ID of this dependency-less node is
330 // going to be (i.e. equal to the precomputed
331 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
332 // a `StableHasher` and sending the node through interning.
333 DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
336 // When there is only one dependency, don't bother creating a node.
340 // The dep node indices are hashed here instead of hashing the dep nodes of the
341 // dependencies. These indices may refer to different nodes per session, but this isn't
342 // a problem here because we that ensure the final dep node hash is per session only by
343 // combining it with the per session random number `anon_id_seed`. This hash only need
344 // to map the dependencies to a single value on a per session basis.
345 let mut hasher = StableHasher::new();
346 task_deps.hash(&mut hasher);
348 let target_dep_node = DepNode {
350 // Fingerprint::combine() is faster than sending Fingerprint
351 // through the StableHasher (at least as long as StableHasher
353 hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
356 data.current.intern_new_node(
365 (result, dep_node_index)
367 (op(), self.next_virtual_depnode_index())
372 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
373 if let Some(ref data) = self.data {
374 K::read_deps(|task_deps| {
375 if let Some(task_deps) = task_deps {
376 let mut task_deps = task_deps.lock();
377 let task_deps = &mut *task_deps;
378 if cfg!(debug_assertions) {
379 data.current.total_read_count.fetch_add(1, Relaxed);
382 // As long as we only have a low number of reads we can avoid doing a hash
383 // insert and potentially allocating/reallocating the hashmap
384 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
385 task_deps.reads.iter().all(|other| *other != dep_node_index)
387 task_deps.read_set.insert(dep_node_index)
390 task_deps.reads.push(dep_node_index);
391 if task_deps.reads.len() == TASK_DEPS_READS_CAP {
392 // Fill `read_set` with what we have so far so we can use the hashset
394 task_deps.read_set.extend(task_deps.reads.iter().copied());
397 #[cfg(debug_assertions)]
399 if let Some(target) = task_deps.node {
400 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
401 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
402 if forbidden_edge.test(&src, &target) {
403 panic!("forbidden edge {:?} -> {:?} created", src, target)
408 } else if cfg!(debug_assertions) {
409 data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
417 pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
418 self.dep_node_index_of_opt(dep_node).unwrap()
422 pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
423 let data = self.data.as_ref().unwrap();
424 let current = &data.current;
426 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
427 current.prev_index_to_index.lock()[prev_index]
429 current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
434 pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
435 self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
438 pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
439 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
442 /// Checks whether a previous work product exists for `v` and, if
443 /// so, return the path that leads to it. Used to skip doing work.
444 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
445 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
448 /// Access the map of work-products created during the cached run. Only
449 /// used during saving of the dep-graph.
450 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
451 &self.data.as_ref().unwrap().previous_work_products
455 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
457 F: FnOnce() -> String,
459 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
461 if dep_node_debug.borrow().contains_key(&dep_node) {
464 let debug_str = debug_str_gen();
465 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
468 pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
469 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
472 fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
473 if let Some(ref data) = self.data {
474 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
475 return data.colors.get(prev_index);
477 // This is a node that did not exist in the previous compilation session.
485 /// Try to mark a node index for the node dep_node.
487 /// A node will have an index, when it's already been marked green, or when we can mark it
488 /// green. This function will mark the current task as a reader of the specified node, when
489 /// a node index can be found for that node.
490 pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
493 dep_node: &DepNode<K>,
494 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
495 debug_assert!(!dep_node.kind.is_eval_always());
497 // Return None if the dep graph is disabled
498 let data = self.data.as_ref()?;
500 // Return None if the dep node didn't exist in the previous session
501 let prev_index = data.previous.node_to_index_opt(dep_node)?;
503 match data.colors.get(prev_index) {
504 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
505 Some(DepNodeColor::Red) => None,
507 // This DepNode and the corresponding query invocation existed
508 // in the previous compilation session too, so we can try to
509 // mark it as green by recursively marking all of its
510 // dependencies green.
511 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
512 .map(|dep_node_index| (prev_index, dep_node_index))
517 fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
520 data: &DepGraphData<K>,
521 parent_dep_node_index: SerializedDepNodeIndex,
522 dep_node: &DepNode<K>,
524 let dep_dep_node_color = data.colors.get(parent_dep_node_index);
525 let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);
527 match dep_dep_node_color {
528 Some(DepNodeColor::Green(_)) => {
529 // This dependency has been marked as green before, we are
530 // still fine and can continue with checking the other
533 "try_mark_previous_green({:?}) --- found dependency {:?} to \
534 be immediately green",
535 dep_node, dep_dep_node,
539 Some(DepNodeColor::Red) => {
540 // We found a dependency the value of which has changed
541 // compared to the previous compilation session. We cannot
542 // mark the DepNode as green and also don't need to bother
543 // with checking any of the other dependencies.
545 "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red",
546 dep_node, dep_dep_node,
553 // We don't know the state of this dependency. If it isn't
554 // an eval_always node, let's try to mark it green recursively.
555 if !dep_dep_node.kind.is_eval_always() {
557 "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
558 is unknown, trying to mark it green",
559 dep_node, dep_dep_node, dep_dep_node.hash,
563 self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node);
564 if node_index.is_some() {
566 "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green",
567 dep_node, dep_dep_node
573 // We failed to mark it green, so we try to force the query.
575 "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
576 dep_node, dep_dep_node
578 if !tcx.try_force_from_dep_node(dep_dep_node) {
579 // The DepNode could not be forced.
581 "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
582 dep_node, dep_dep_node
587 let dep_dep_node_color = data.colors.get(parent_dep_node_index);
589 match dep_dep_node_color {
590 Some(DepNodeColor::Green(_)) => {
592 "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green",
593 dep_node, dep_dep_node
597 Some(DepNodeColor::Red) => {
599 "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing",
600 dep_node, dep_dep_node
607 if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
608 panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
611 // If the query we just forced has resulted in
612 // some kind of compilation error, we cannot rely on
613 // the dep-node color having been properly updated.
614 // This means that the query system has reached an
615 // invalid state. We let the compiler continue (by
616 // returning `None`) so it can emit error messages
617 // and wind down, but rely on the fact that this
618 // invalid state will not be persisted to the
619 // incremental compilation cache because of
620 // compilation errors being present.
622 "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error",
623 dep_node, dep_dep_node
628 /// Try to mark a dep-node which existed in the previous compilation session as green.
629 fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
632 data: &DepGraphData<K>,
633 prev_dep_node_index: SerializedDepNodeIndex,
634 dep_node: &DepNode<K>,
635 ) -> Option<DepNodeIndex> {
636 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
638 #[cfg(not(parallel_compiler))]
640 debug_assert!(!self.dep_node_exists(dep_node));
641 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
644 // We never try to mark eval_always nodes as green
645 debug_assert!(!dep_node.kind.is_eval_always());
647 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
649 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
651 for &dep_dep_node_index in prev_deps {
652 self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)?
655 // If we got here without hitting a `return` that means that all
656 // dependencies of this DepNode could be marked as green. Therefore we
657 // can also mark this DepNode as green.
659 // There may be multiple threads trying to mark the same dep node green concurrently
661 // We allocating an entry for the node in the current dependency graph and
662 // adding all the appropriate edges imported from the previous graph
663 let dep_node_index = data.current.promote_node_and_deps_to_current(
664 tcx.dep_context().profiler(),
669 // ... emitting any stored diagnostic ...
671 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
672 // Maybe store a list on disk and encode this fact in the DepNodeState
673 let side_effects = tcx.load_side_effects(prev_dep_node_index);
675 #[cfg(not(parallel_compiler))]
677 data.colors.get(prev_dep_node_index).is_none(),
678 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
683 if unlikely!(!side_effects.is_empty()) {
684 self.emit_side_effects(tcx, data, dep_node_index, side_effects);
687 // ... and finally storing a "Green" entry in the color map.
688 // Multiple threads can all write the same color here
689 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
691 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
695 /// Atomically emits some loaded diagnostics.
696 /// This may be called concurrently on multiple threads for the same dep node.
699 fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
702 data: &DepGraphData<K>,
703 dep_node_index: DepNodeIndex,
704 side_effects: QuerySideEffects,
706 let mut processed = data.processed_side_effects.lock();
708 if processed.insert(dep_node_index) {
709 // We were the first to insert the node in the set so this thread
710 // must process side effects
712 // Promote the previous diagnostics to the current session.
713 tcx.store_side_effects(dep_node_index, side_effects.clone());
715 let handle = tcx.dep_context().sess().diagnostic();
717 for diagnostic in side_effects.diagnostics {
718 handle.emit_diagnostic(&diagnostic);
723 // Returns true if the given node has been marked as red during the
724 // current compilation session. Used in various assertions
725 pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
726 self.node_color(dep_node) == Some(DepNodeColor::Red)
729 // Returns true if the given node has been marked as green during the
730 // current compilation session. Used in various assertions
731 pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
732 self.node_color(dep_node).map_or(false, |c| c.is_green())
735 // This method loads all on-disk cacheable query results into memory, so
736 // they can be written out to the new cache file again. Most query results
737 // will already be in memory but in the case where we marked something as
738 // green but then did not need the value, that value will never have been
741 // This method will only load queries that will end up in the disk cache.
742 // Other queries will not be executed.
743 pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
744 let tcx = qcx.dep_context();
745 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
747 let data = self.data.as_ref().unwrap();
748 for prev_index in data.colors.values.indices() {
749 match data.colors.get(prev_index) {
750 Some(DepNodeColor::Green(_)) => {
751 let dep_node = data.previous.index_to_node(prev_index);
752 qcx.try_load_from_on_disk_cache(&dep_node);
754 None | Some(DepNodeColor::Red) => {
755 // We can skip red nodes because a node can only be marked
756 // as red if the query result was recomputed and thus is
757 // already in memory.
763 pub fn print_incremental_info(&self) {
764 if let Some(data) = &self.data {
765 data.current.encoder.borrow().print_incremental_info(
766 data.current.total_read_count.load(Relaxed),
767 data.current.total_duplicate_read_count.load(Relaxed),
772 pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
773 if let Some(data) = &self.data {
774 data.current.encoder.steal().finish(profiler)
780 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
781 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
782 DepNodeIndex::from_u32(index)
786 /// A "work product" is an intermediate result that we save into the
787 /// incremental directory for later re-use. The primary example are
788 /// the object files that we save for each partition at code
791 /// Each work product is associated with a dep-node, representing the
792 /// process that produced the work-product. If that dep-node is found
793 /// to be dirty when we load up, then we will delete the work-product
794 /// at load time. If the work-product is found to be clean, then we
795 /// will keep a record in the `previous_work_products` list.
797 /// In addition, work products have an associated hash. This hash is
798 /// an extra hash that can be used to decide if the work-product from
799 /// a previous compilation can be re-used (in addition to the dirty
802 /// As the primary example, consider the object files we generate for
803 /// each partition. In the first run, we create partitions based on
804 /// the symbols that need to be compiled. For each partition P, we
805 /// hash the symbols in P and create a `WorkProduct` record associated
806 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
809 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
810 /// judged to be clean (which means none of the things we read to
811 /// generate the partition were found to be dirty), it will be loaded
812 /// into previous work products. We will then regenerate the set of
813 /// symbols in the partition P and hash them (note that new symbols
814 /// may be added -- for example, new monomorphizations -- even if
815 /// nothing in P changed!). We will compare that hash against the
816 /// previous hash. If it matches up, we can reuse the object file.
817 #[derive(Clone, Debug, Encodable, Decodable)]
818 pub struct WorkProduct {
819 pub cgu_name: String,
820 /// Saved file associated with this CGU.
821 pub saved_file: Option<String>,
824 // Index type for `DepNodeData`'s edges.
825 rustc_index::newtype_index! {
826 struct EdgeIndex { .. }
829 /// `CurrentDepGraph` stores the dependency graph for the current session. It
830 /// will be populated as we run queries or tasks. We never remove nodes from the
831 /// graph: they are only added.
833 /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
834 /// in memory. This is important, because these graph structures are some of the
835 /// largest in the compiler.
837 /// For this reason, we avoid storing `DepNode`s more than once as map
838 /// keys. The `new_node_to_index` map only contains nodes not in the previous
839 /// graph, and we map nodes in the previous graph to indices via a two-step
840 /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
841 /// and the `prev_index_to_index` vector (which is more compact and faster than
842 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
844 /// This struct uses three locks internally. The `data`, `new_node_to_index`,
845 /// and `prev_index_to_index` fields are locked separately. Operations that take
846 /// a `DepNodeIndex` typically just access the `data` field.
848 /// We only need to manipulate at most two locks simultaneously:
849 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
850 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
851 /// first, and `data` second.
852 pub(super) struct CurrentDepGraph<K: DepKind> {
853 encoder: Steal<GraphEncoder<K>>,
854 new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
855 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
857 /// Used to trap when a specific edge is added to the graph.
858 /// This is used for debug purposes and is only active with `debug_assertions`.
859 #[cfg(debug_assertions)]
860 forbidden_edge: Option<EdgeFilter<K>>,
862 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
863 /// their edges. This has the beneficial side-effect that multiple anonymous
864 /// nodes can be coalesced into one without changing the semantics of the
865 /// dependency graph. However, the merging of nodes can lead to a subtle
866 /// problem during red-green marking: The color of an anonymous node from
867 /// the current session might "shadow" the color of the node with the same
868 /// ID from the previous session. In order to side-step this problem, we make
869 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
870 /// This is implemented by mixing a session-key into the ID fingerprint of
871 /// each anon node. The session-key is just a random number generated when
872 /// the `DepGraph` is created.
873 anon_id_seed: Fingerprint,
875 /// These are simple counters that are for profiling and
876 /// debugging and only active with `debug_assertions`.
877 total_read_count: AtomicU64,
878 total_duplicate_read_count: AtomicU64,
881 impl<K: DepKind> CurrentDepGraph<K> {
883 prev_graph_node_count: usize,
884 encoder: FileEncoder,
887 ) -> CurrentDepGraph<K> {
888 use std::time::{SystemTime, UNIX_EPOCH};
890 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
891 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
892 let mut stable_hasher = StableHasher::new();
893 nanos.hash(&mut stable_hasher);
895 #[cfg(debug_assertions)]
896 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
897 Ok(s) => match EdgeFilter::new(&s) {
899 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
904 // We store a large collection of these in `prev_index_to_index` during
905 // non-full incremental builds, and want to ensure that the element size
906 // doesn't inadvertently increase.
907 static_assert_size!(Option<DepNodeIndex>, 4);
909 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
912 encoder: Steal::new(GraphEncoder::new(
914 prev_graph_node_count,
918 new_node_to_index: Sharded::new(|| {
919 FxHashMap::with_capacity_and_hasher(
920 new_node_count_estimate / sharded::SHARDS,
924 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
925 anon_id_seed: stable_hasher.finish(),
926 #[cfg(debug_assertions)]
928 total_read_count: AtomicU64::new(0),
929 total_duplicate_read_count: AtomicU64::new(0),
933 #[cfg(debug_assertions)]
934 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
935 if let Some(forbidden_edge) = &self.forbidden_edge {
936 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
940 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
941 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
944 profiler: &SelfProfilerRef,
947 current_fingerprint: Fingerprint,
949 match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
950 Entry::Occupied(entry) => *entry.get(),
951 Entry::Vacant(entry) => {
953 self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
954 entry.insert(dep_node_index);
955 #[cfg(debug_assertions)]
956 self.record_edge(dep_node_index, key);
964 profiler: &SelfProfilerRef,
965 prev_graph: &SerializedDepGraph<K>,
968 fingerprint: Option<Fingerprint>,
970 ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
971 let print_status = cfg!(debug_assertions) && print_status;
973 if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
974 // Determine the color and index of the new `DepNode`.
975 if let Some(fingerprint) = fingerprint {
976 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
978 eprintln!("[task::green] {:?}", key);
981 // This is a green node: it existed in the previous compilation,
982 // its query was re-executed, and it has the same result as before.
983 let mut prev_index_to_index = self.prev_index_to_index.lock();
985 let dep_node_index = match prev_index_to_index[prev_index] {
986 Some(dep_node_index) => dep_node_index,
989 self.encoder.borrow().send(profiler, key, fingerprint, edges);
990 prev_index_to_index[prev_index] = Some(dep_node_index);
995 #[cfg(debug_assertions)]
996 self.record_edge(dep_node_index, key);
997 (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1000 eprintln!("[task::red] {:?}", key);
1003 // This is a red node: it existed in the previous compilation, its query
1004 // was re-executed, but it has a different result from before.
1005 let mut prev_index_to_index = self.prev_index_to_index.lock();
1007 let dep_node_index = match prev_index_to_index[prev_index] {
1008 Some(dep_node_index) => dep_node_index,
1010 let dep_node_index =
1011 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1012 prev_index_to_index[prev_index] = Some(dep_node_index);
1017 #[cfg(debug_assertions)]
1018 self.record_edge(dep_node_index, key);
1019 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1023 eprintln!("[task::unknown] {:?}", key);
1026 // This is a red node, effectively: it existed in the previous compilation
1027 // session, its query was re-executed, but it doesn't compute a result hash
1028 // (i.e. it represents a `no_hash` query), so we have no way of determining
1029 // whether or not the result was the same as before.
1030 let mut prev_index_to_index = self.prev_index_to_index.lock();
1032 let dep_node_index = match prev_index_to_index[prev_index] {
1033 Some(dep_node_index) => dep_node_index,
1035 let dep_node_index =
1036 self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
1037 prev_index_to_index[prev_index] = Some(dep_node_index);
1042 #[cfg(debug_assertions)]
1043 self.record_edge(dep_node_index, key);
1044 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1048 eprintln!("[task::new] {:?}", key);
1051 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1053 // This is a new node: it didn't exist in the previous compilation session.
1054 let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
1056 (dep_node_index, None)
1060 fn promote_node_and_deps_to_current(
1062 profiler: &SelfProfilerRef,
1063 prev_graph: &SerializedDepGraph<K>,
1064 prev_index: SerializedDepNodeIndex,
1066 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1068 let mut prev_index_to_index = self.prev_index_to_index.lock();
1070 match prev_index_to_index[prev_index] {
1071 Some(dep_node_index) => dep_node_index,
1073 let key = prev_graph.index_to_node(prev_index);
1074 let dep_node_index = self.encoder.borrow().send(
1077 prev_graph.fingerprint_by_index(prev_index),
1079 .edge_targets_from(prev_index)
1081 .map(|i| prev_index_to_index[*i].unwrap())
1084 prev_index_to_index[prev_index] = Some(dep_node_index);
1085 #[cfg(debug_assertions)]
1086 self.record_edge(dep_node_index, key);
1093 fn debug_assert_not_in_new_nodes(
1095 prev_graph: &SerializedDepGraph<K>,
1096 prev_index: SerializedDepNodeIndex,
1098 let node = &prev_graph.index_to_node(prev_index);
1100 !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
1101 "node from previous graph present in new node collection"
1106 /// The capacity of the `reads` field `SmallVec`
1107 const TASK_DEPS_READS_CAP: usize = 8;
1108 type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1110 pub struct TaskDeps<K> {
1111 #[cfg(debug_assertions)]
1112 node: Option<DepNode<K>>,
1114 read_set: FxHashSet<DepNodeIndex>,
1115 phantom_data: PhantomData<DepNode<K>>,
1118 impl<K> Default for TaskDeps<K> {
1119 fn default() -> Self {
1121 #[cfg(debug_assertions)]
1123 reads: EdgesVec::new(),
1124 read_set: FxHashSet::default(),
1125 phantom_data: PhantomData,
1130 // A data structure that stores Option<DepNodeColor> values as a contiguous
1131 // array, using one u32 per entry.
1132 struct DepNodeColorMap {
1133 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1136 const COMPRESSED_NONE: u32 = 0;
1137 const COMPRESSED_RED: u32 = 1;
1138 const COMPRESSED_FIRST_GREEN: u32 = 2;
1140 impl DepNodeColorMap {
1141 fn new(size: usize) -> DepNodeColorMap {
1142 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1146 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1147 match self.values[index].load(Ordering::Acquire) {
1148 COMPRESSED_NONE => None,
1149 COMPRESSED_RED => Some(DepNodeColor::Red),
1151 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1156 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1157 self.values[index].store(
1159 DepNodeColor::Red => COMPRESSED_RED,
1160 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,