1 use parking_lot::Mutex;
2 use rustc_data_structures::fingerprint::Fingerprint;
3 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
4 use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
5 use rustc_data_structures::sharded::{self, Sharded};
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_data_structures::steal::Steal;
8 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
9 use rustc_index::vec::IndexVec;
10 use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
11 use smallvec::{smallvec, SmallVec};
12 use std::assert_matches::assert_matches;
13 use std::collections::hash_map::Entry;
16 use std::marker::PhantomData;
17 use std::sync::atomic::Ordering::Relaxed;
19 use super::query::DepGraphQuery;
20 use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
21 use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
22 use crate::ich::StableHashingContext;
23 use crate::query::{QueryContext, QuerySideEffects};
25 #[cfg(debug_assertions)]
26 use {super::debug::EdgeFilter, std::env};
29 pub struct DepGraph<K: DepKind> {
30 data: Option<Lrc<DepGraphData<K>>>,
32 /// This field is used for assigning DepNodeIndices when running in
33 /// non-incremental mode. Even in non-incremental mode we make sure that
34 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
35 /// ID is used for self-profiling.
36 virtual_dep_node_index: Lrc<AtomicU32>,
39 rustc_index::newtype_index! {
40 pub struct DepNodeIndex {}
44 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
45 pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
46 pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
49 impl From<DepNodeIndex> for QueryInvocationId {
51 fn from(dep_node_index: DepNodeIndex) -> Self {
52 QueryInvocationId(dep_node_index.as_u32())
57 pub enum DepNodeColor {
64 pub fn is_green(self) -> bool {
66 DepNodeColor::Red => false,
67 DepNodeColor::Green(_) => true,
72 struct DepGraphData<K: DepKind> {
73 /// The new encoding of the dependency graph, optimized for red/green
74 /// tracking. The `current` field is the dependency graph of only the
75 /// current compilation session: We don't merge the previous dep-graph into
76 /// current one anymore, but we do reference shared data to save space.
77 current: CurrentDepGraph<K>,
79 /// The dep-graph from the previous compilation session. It contains all
80 /// nodes and edges as well as all fingerprints of nodes that have them.
81 previous: SerializedDepGraph<K>,
83 colors: DepNodeColorMap,
85 processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
87 /// When we load, there may be `.o` files, cached MIR, or other such
88 /// things available to us. If we find that they are not dirty, we
89 /// load the path to the file storing those work-products here into
90 /// this map. We can later look for and extract that data.
91 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
93 dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
95 /// Used by incremental compilation tests to assert that
96 /// a particular query result was decoded from disk
97 /// (not just marked green)
98 debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
101 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
103 R: for<'a> HashStable<StableHashingContext<'a>>,
105 let mut stable_hasher = StableHasher::new();
106 result.hash_stable(hcx, &mut stable_hasher);
107 stable_hasher.finish()
110 impl<K: DepKind> DepGraph<K> {
112 profiler: &SelfProfilerRef,
113 prev_graph: SerializedDepGraph<K>,
114 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
115 encoder: FileEncoder,
119 let prev_graph_node_count = prev_graph.node_count();
121 let current = CurrentDepGraph::new(
123 prev_graph_node_count,
129 let colors = DepNodeColorMap::new(prev_graph_node_count);
131 // Instantiate a dependy-less node only once for anonymous queries.
132 let _green_node_index = current.intern_new_node(
134 DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
138 assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
140 // Instantiate a dependy-less red node only once for anonymous queries.
141 let (_red_node_index, _prev_and_index) = current.intern_node(
144 DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
149 assert_eq!(_red_node_index, DepNodeIndex::FOREVER_RED_NODE);
150 assert!(matches!(_prev_and_index, None | Some((_, DepNodeColor::Red))));
153 data: Some(Lrc::new(DepGraphData {
154 previous_work_products: prev_work_products,
155 dep_node_debug: Default::default(),
157 processed_side_effects: Default::default(),
158 previous: prev_graph,
160 debug_loaded_from_disk: Default::default(),
162 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
166 pub fn new_disabled() -> DepGraph<K> {
167 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
170 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
172 pub fn is_fully_enabled(&self) -> bool {
176 pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
177 if let Some(data) = &self.data {
178 data.current.encoder.borrow().with_query(f)
182 pub fn assert_ignored(&self) {
183 if let Some(..) = self.data {
184 K::read_deps(|task_deps| {
188 "expected no task dependency tracking"
194 pub fn with_ignore<OP, R>(&self, op: OP) -> R
198 K::with_deps(TaskDepsRef::Ignore, op)
201 /// Used to wrap the deserialization of a query result from disk,
202 /// This method enforces that no new `DepNodes` are created during
203 /// query result deserialization.
205 /// Enforcing this makes the query dep graph simpler - all nodes
206 /// must be created during the query execution, and should be
207 /// created from inside the 'body' of a query (the implementation
208 /// provided by a particular compiler crate).
210 /// Consider the case of three queries `A`, `B`, and `C`, where
211 /// `A` invokes `B` and `B` invokes `C`:
215 /// Suppose that decoding the result of query `B` required re-computing
216 /// the query `C`. If we did not create a fresh `TaskDeps` when
217 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
218 /// (if we needed to re-execute `A`). This would cause us to create
219 /// a new edge `A -> C`. If this edge did not previously
220 /// exist in the `DepGraph`, then we could end up with a different
221 /// `DepGraph` at the end of compilation, even if there were no
222 /// meaningful changes to the overall program (e.g. a newline was added).
223 /// In addition, this edge might cause a subsequent compilation run
224 /// to try to force `C` before marking other necessary nodes green. If
225 /// `C` did not exist in the new compilation session, then we could
226 /// get an ICE. Normally, we would have tried (and failed) to mark
227 /// some other query green (e.g. `item_children`) which was used
228 /// to obtain `C`, which would prevent us from ever trying to force
229 /// a non-existent `D`.
231 /// It might be possible to enforce that all `DepNode`s read during
232 /// deserialization already exist in the previous `DepGraph`. In
233 /// the above example, we would invoke `D` during the deserialization
234 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
235 /// of `B`, this would result in an edge `B -> D`. If that edge already
236 /// existed (with the same `DepPathHash`es), then it should be correct
237 /// to allow the invocation of the query to proceed during deserialization
238 /// of a query result. We would merely assert that the dep-graph fragment
239 /// that would have been added by invoking `C` while decoding `B`
240 /// is equivalent to the dep-graph fragment that we already instantiated for B
241 /// (at the point where we successfully marked B as green).
243 /// However, this would require additional complexity
244 /// in the query infrastructure, and is not currently needed by the
245 /// decoding of any query results. Should the need arise in the future,
246 /// we should consider extending the query system with this functionality.
247 pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
251 K::with_deps(TaskDepsRef::Forbid, op)
254 /// Starts a new dep-graph task. Dep-graph tasks are specified
255 /// using a free function (`task`) and **not** a closure -- this
256 /// is intentional because we want to exercise tight control over
257 /// what state they have access to. In particular, we want to
258 /// prevent implicit 'leaks' of tracked state into the task (which
259 /// could then be read without generating correct edges in the
260 /// dep-graph -- see the [rustc dev guide] for more details on
261 /// the dep-graph). To this end, the task function gets exactly two
262 /// pieces of state: the context `cx` and an argument `arg`. Both
263 /// of these bits of state must be of some type that implements
264 /// `DepGraphSafe` and hence does not leak.
266 /// The choice of two arguments is not fundamental. One argument
267 /// would work just as well, since multiple values can be
268 /// collected using tuples. However, using two arguments works out
269 /// to be quite convenient, since it is common to need a context
270 /// (`cx`) and some argument (e.g., a `DefId` identifying what
271 /// item to process).
273 /// For cases where you need some other number of arguments:
275 /// - If you only need one argument, just use `()` for the `arg`
277 /// - If you need 3+ arguments, use a tuple for the
280 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
281 pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
286 task: fn(Ctxt, A) -> R,
287 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
288 ) -> (R, DepNodeIndex) {
289 if self.is_fully_enabled() {
290 self.with_task_impl(key, cx, arg, task, hash_result)
292 // Incremental compilation is turned off. We just execute the task
293 // without tracking. We still provide a dep-node index that uniquely
294 // identifies the task so that we have a cheap way of referring to
295 // the query for self-profiling.
296 (task(cx, arg), self.next_virtual_depnode_index())
300 fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
305 task: fn(Ctxt, A) -> R,
306 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
307 ) -> (R, DepNodeIndex) {
308 // This function is only called when the graph is enabled.
309 let data = self.data.as_ref().unwrap();
311 // If the following assertion triggers, it can have two reasons:
312 // 1. Something is wrong with DepNode creation, either here or
313 // in `DepGraph::try_mark_green()`.
314 // 2. Two distinct query keys get mapped to the same `DepNode`
315 // (see for example #48923).
317 !self.dep_node_exists(&key),
318 "forcing query with already existing `DepNode`\n\
325 let task_deps = if cx.dep_context().is_eval_always(key.kind) {
328 Some(Lock::new(TaskDeps {
329 #[cfg(debug_assertions)]
331 reads: SmallVec::new(),
332 read_set: Default::default(),
333 phantom_data: PhantomData,
337 let task_deps_ref = match &task_deps {
338 Some(deps) => TaskDepsRef::Allow(deps),
339 None => TaskDepsRef::Ignore,
342 let result = K::with_deps(task_deps_ref, || task(cx, arg));
343 let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
345 let dcx = cx.dep_context();
346 let hashing_timer = dcx.profiler().incr_result_hashing();
347 let current_fingerprint =
348 hash_result.map(|f| dcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, &result)));
350 let print_status = cfg!(debug_assertions) && dcx.sess().opts.unstable_opts.dep_tasks;
352 // Intern the new `DepNode`.
353 let (dep_node_index, prev_and_color) = data.current.intern_node(
362 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
364 if let Some((prev_index, color)) = prev_and_color {
366 data.colors.get(prev_index).is_none(),
367 "DepGraph::with_task() - Duplicate DepNodeColor \
372 data.colors.insert(prev_index, color);
375 (result, dep_node_index)
378 /// Executes something within an "anonymous" task, that is, a task the
379 /// `DepNode` of which is determined by the list of inputs it read from.
380 pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
385 ) -> (R, DepNodeIndex)
389 debug_assert!(!cx.is_eval_always(dep_kind));
391 if let Some(ref data) = self.data {
392 let task_deps = Lock::new(TaskDeps::default());
393 let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
394 let task_deps = task_deps.into_inner();
395 let task_deps = task_deps.reads;
397 let dep_node_index = match task_deps.len() {
399 // Because the dep-node id of anon nodes is computed from the sets of its
400 // dependencies we already know what the ID of this dependency-less node is
401 // going to be (i.e. equal to the precomputed
402 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
403 // a `StableHasher` and sending the node through interning.
404 DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
407 // When there is only one dependency, don't bother creating a node.
411 // The dep node indices are hashed here instead of hashing the dep nodes of the
412 // dependencies. These indices may refer to different nodes per session, but this isn't
413 // a problem here because we that ensure the final dep node hash is per session only by
414 // combining it with the per session random number `anon_id_seed`. This hash only need
415 // to map the dependencies to a single value on a per session basis.
416 let mut hasher = StableHasher::new();
417 task_deps.hash(&mut hasher);
419 let target_dep_node = DepNode {
421 // Fingerprint::combine() is faster than sending Fingerprint
422 // through the StableHasher (at least as long as StableHasher
424 hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
427 data.current.intern_new_node(
436 (result, dep_node_index)
438 (op(), self.next_virtual_depnode_index())
443 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
444 if let Some(ref data) = self.data {
445 K::read_deps(|task_deps| {
446 let mut task_deps = match task_deps {
447 TaskDepsRef::Allow(deps) => deps.lock(),
448 TaskDepsRef::Ignore => return,
449 TaskDepsRef::Forbid => {
450 panic!("Illegal read of: {:?}", dep_node_index)
453 let task_deps = &mut *task_deps;
455 if cfg!(debug_assertions) {
456 data.current.total_read_count.fetch_add(1, Relaxed);
459 // As long as we only have a low number of reads we can avoid doing a hash
460 // insert and potentially allocating/reallocating the hashmap
461 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
462 task_deps.reads.iter().all(|other| *other != dep_node_index)
464 task_deps.read_set.insert(dep_node_index)
467 task_deps.reads.push(dep_node_index);
468 if task_deps.reads.len() == TASK_DEPS_READS_CAP {
469 // Fill `read_set` with what we have so far so we can use the hashset
471 task_deps.read_set.extend(task_deps.reads.iter().copied());
474 #[cfg(debug_assertions)]
476 if let Some(target) = task_deps.node {
477 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
478 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
479 if forbidden_edge.test(&src, &target) {
480 panic!("forbidden edge {:?} -> {:?} created", src, target)
485 } else if cfg!(debug_assertions) {
486 data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
492 /// Create a node when we force-feed a value into the query cache.
493 /// This is used to remove cycles during type-checking const generic parameters.
495 /// As usual in the query system, we consider the current state of the calling query
496 /// only depends on the list of dependencies up to now. As a consequence, the value
497 /// that this query gives us can only depend on those dependencies too. Therefore,
498 /// it is sound to use the current dependency set for the created node.
500 /// During replay, the order of the nodes is relevant in the dependency graph.
501 /// So the unchanged replay will mark the caller query before trying to mark this one.
502 /// If there is a change to report, the caller query will be re-executed before this one.
504 /// FIXME: If the code is changed enough for this node to be marked before requiring the
505 /// caller's node, we suppose that those changes will be enough to mark this node red and
506 /// force a recomputation using the "normal" way.
507 pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
513 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
515 if let Some(data) = self.data.as_ref() {
516 // The caller query has more dependencies than the node we are creating. We may
517 // encounter a case where this created node is marked as green, but the caller query is
518 // subsequently marked as red or recomputed. In this case, we will end up feeding a
519 // value to an existing node.
521 // For sanity, we still check that the loaded stable hash and the new one match.
522 if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) {
523 let _current_fingerprint =
524 crate::query::incremental_verify_ich(cx, result, &node, hash_result);
526 #[cfg(debug_assertions)]
527 if hash_result.is_some() {
528 data.current.record_edge(dep_node_index, node, _current_fingerprint);
531 return dep_node_index;
534 let mut edges = SmallVec::new();
535 K::read_deps(|task_deps| match task_deps {
536 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
537 TaskDepsRef::Ignore => {} // During HIR lowering, we have no dependencies.
538 TaskDepsRef::Forbid => {
539 panic!("Cannot summarize when dependencies are not recorded.")
543 let hashing_timer = cx.profiler().incr_result_hashing();
544 let current_fingerprint = hash_result.map(|hash_result| {
545 cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
548 let print_status = cfg!(debug_assertions) && cx.sess().opts.unstable_opts.dep_tasks;
550 // Intern the new `DepNode` with the dependencies up-to-now.
551 let (dep_node_index, prev_and_color) = data.current.intern_node(
560 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
562 if let Some((prev_index, color)) = prev_and_color {
564 data.colors.get(prev_index).is_none(),
565 "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}",
568 data.colors.insert(prev_index, color);
573 // Incremental compilation is turned off. We just execute the task
574 // without tracking. We still provide a dep-node index that uniquely
575 // identifies the task so that we have a cheap way of referring to
576 // the query for self-profiling.
577 self.next_virtual_depnode_index()
582 pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
583 self.dep_node_index_of_opt(dep_node).unwrap()
587 pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
588 let data = self.data.as_ref().unwrap();
589 let current = &data.current;
591 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
592 current.prev_index_to_index.lock()[prev_index]
594 current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
599 pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
600 self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
603 pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
604 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
607 /// Checks whether a previous work product exists for `v` and, if
608 /// so, return the path that leads to it. Used to skip doing work.
609 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
610 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
613 /// Access the map of work-products created during the cached run. Only
614 /// used during saving of the dep-graph.
615 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
616 &self.data.as_ref().unwrap().previous_work_products
619 pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
620 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node);
623 pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
624 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
628 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
630 F: FnOnce() -> String,
632 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
634 if dep_node_debug.borrow().contains_key(&dep_node) {
637 let debug_str = self.with_ignore(debug_str_gen);
638 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
641 pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
642 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
645 fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
646 if let Some(ref data) = self.data {
647 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
648 return data.colors.get(prev_index);
650 // This is a node that did not exist in the previous compilation session.
658 /// Try to mark a node index for the node dep_node.
660 /// A node will have an index, when it's already been marked green, or when we can mark it
661 /// green. This function will mark the current task as a reader of the specified node, when
662 /// a node index can be found for that node.
663 pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
666 dep_node: &DepNode<K>,
667 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
668 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
670 // Return None if the dep graph is disabled
671 let data = self.data.as_ref()?;
673 // Return None if the dep node didn't exist in the previous session
674 let prev_index = data.previous.node_to_index_opt(dep_node)?;
676 match data.colors.get(prev_index) {
677 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
678 Some(DepNodeColor::Red) => None,
680 // This DepNode and the corresponding query invocation existed
681 // in the previous compilation session too, so we can try to
682 // mark it as green by recursively marking all of its
683 // dependencies green.
684 self.try_mark_previous_green(qcx, data, prev_index, &dep_node)
685 .map(|dep_node_index| (prev_index, dep_node_index))
690 #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
691 fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
694 data: &DepGraphData<K>,
695 parent_dep_node_index: SerializedDepNodeIndex,
696 dep_node: &DepNode<K>,
698 let dep_dep_node_color = data.colors.get(parent_dep_node_index);
699 let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);
701 match dep_dep_node_color {
702 Some(DepNodeColor::Green(_)) => {
703 // This dependency has been marked as green before, we are
704 // still fine and can continue with checking the other
706 debug!("dependency {dep_dep_node:?} was immediately green");
709 Some(DepNodeColor::Red) => {
710 // We found a dependency the value of which has changed
711 // compared to the previous compilation session. We cannot
712 // mark the DepNode as green and also don't need to bother
713 // with checking any of the other dependencies.
714 debug!("dependency {dep_dep_node:?} was immediately red");
720 // We don't know the state of this dependency. If it isn't
721 // an eval_always node, let's try to mark it green recursively.
722 if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
724 "state of dependency {:?} ({}) is unknown, trying to mark it green",
725 dep_dep_node, dep_dep_node.hash,
729 self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node);
731 if node_index.is_some() {
732 debug!("managed to MARK dependency {dep_dep_node:?} as green",);
737 // We failed to mark it green, so we try to force the query.
738 debug!("trying to force dependency {dep_dep_node:?}");
739 if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
740 // The DepNode could not be forced.
741 debug!("dependency {dep_dep_node:?} could not be forced");
745 let dep_dep_node_color = data.colors.get(parent_dep_node_index);
747 match dep_dep_node_color {
748 Some(DepNodeColor::Green(_)) => {
749 debug!("managed to FORCE dependency {dep_dep_node:?} to green");
752 Some(DepNodeColor::Red) => {
753 debug!("dependency {dep_dep_node:?} was red after forcing",);
759 if let None = qcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
760 panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
763 // If the query we just forced has resulted in
764 // some kind of compilation error, we cannot rely on
765 // the dep-node color having been properly updated.
766 // This means that the query system has reached an
767 // invalid state. We let the compiler continue (by
768 // returning `None`) so it can emit error messages
769 // and wind down, but rely on the fact that this
770 // invalid state will not be persisted to the
771 // incremental compilation cache because of
772 // compilation errors being present.
773 debug!("dependency {dep_dep_node:?} resulted in compilation error",);
777 /// Try to mark a dep-node which existed in the previous compilation session as green.
778 #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
779 fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
782 data: &DepGraphData<K>,
783 prev_dep_node_index: SerializedDepNodeIndex,
784 dep_node: &DepNode<K>,
785 ) -> Option<DepNodeIndex> {
786 #[cfg(not(parallel_compiler))]
788 debug_assert!(!self.dep_node_exists(dep_node));
789 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
792 // We never try to mark eval_always nodes as green
793 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
795 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
797 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
799 for &dep_dep_node_index in prev_deps {
800 self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node)?
803 // If we got here without hitting a `return` that means that all
804 // dependencies of this DepNode could be marked as green. Therefore we
805 // can also mark this DepNode as green.
807 // There may be multiple threads trying to mark the same dep node green concurrently
809 // We allocating an entry for the node in the current dependency graph and
810 // adding all the appropriate edges imported from the previous graph
811 let dep_node_index = data.current.promote_node_and_deps_to_current(
812 qcx.dep_context().profiler(),
817 // ... emitting any stored diagnostic ...
819 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
820 // Maybe store a list on disk and encode this fact in the DepNodeState
821 let side_effects = qcx.load_side_effects(prev_dep_node_index);
823 #[cfg(not(parallel_compiler))]
825 data.colors.get(prev_dep_node_index).is_none(),
826 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
831 if !side_effects.is_empty() {
832 self.with_query_deserialization(|| {
833 self.emit_side_effects(qcx, data, dep_node_index, side_effects)
837 // ... and finally storing a "Green" entry in the color map.
838 // Multiple threads can all write the same color here
839 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
841 debug!("successfully marked {dep_node:?} as green");
845 /// Atomically emits some loaded diagnostics.
846 /// This may be called concurrently on multiple threads for the same dep node.
849 fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
852 data: &DepGraphData<K>,
853 dep_node_index: DepNodeIndex,
854 side_effects: QuerySideEffects,
856 let mut processed = data.processed_side_effects.lock();
858 if processed.insert(dep_node_index) {
859 // We were the first to insert the node in the set so this thread
860 // must process side effects
862 // Promote the previous diagnostics to the current session.
863 qcx.store_side_effects(dep_node_index, side_effects.clone());
865 let handle = qcx.dep_context().sess().diagnostic();
867 for mut diagnostic in side_effects.diagnostics {
868 handle.emit_diagnostic(&mut diagnostic);
873 /// Returns true if the given node has been marked as red during the
874 /// current compilation session. Used in various assertions
875 pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
876 self.node_color(dep_node) == Some(DepNodeColor::Red)
879 /// Returns true if the given node has been marked as green during the
880 /// current compilation session. Used in various assertions
881 pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
882 self.node_color(dep_node).map_or(false, |c| c.is_green())
885 /// This method loads all on-disk cacheable query results into memory, so
886 /// they can be written out to the new cache file again. Most query results
887 /// will already be in memory but in the case where we marked something as
888 /// green but then did not need the value, that value will never have been
889 /// loaded from disk.
891 /// This method will only load queries that will end up in the disk cache.
892 /// Other queries will not be executed.
893 pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
894 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
896 let data = self.data.as_ref().unwrap();
897 for prev_index in data.colors.values.indices() {
898 match data.colors.get(prev_index) {
899 Some(DepNodeColor::Green(_)) => {
900 let dep_node = data.previous.index_to_node(prev_index);
901 tcx.try_load_from_on_disk_cache(dep_node);
903 None | Some(DepNodeColor::Red) => {
904 // We can skip red nodes because a node can only be marked
905 // as red if the query result was recomputed and thus is
906 // already in memory.
912 pub fn print_incremental_info(&self) {
913 if let Some(data) = &self.data {
914 data.current.encoder.borrow().print_incremental_info(
915 data.current.total_read_count.load(Relaxed),
916 data.current.total_duplicate_read_count.load(Relaxed),
921 pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
922 if let Some(data) = &self.data {
923 data.current.encoder.steal().finish(profiler)
929 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
930 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
931 DepNodeIndex::from_u32(index)
935 /// A "work product" is an intermediate result that we save into the
936 /// incremental directory for later re-use. The primary example are
937 /// the object files that we save for each partition at code
940 /// Each work product is associated with a dep-node, representing the
941 /// process that produced the work-product. If that dep-node is found
942 /// to be dirty when we load up, then we will delete the work-product
943 /// at load time. If the work-product is found to be clean, then we
944 /// will keep a record in the `previous_work_products` list.
946 /// In addition, work products have an associated hash. This hash is
947 /// an extra hash that can be used to decide if the work-product from
948 /// a previous compilation can be re-used (in addition to the dirty
951 /// As the primary example, consider the object files we generate for
952 /// each partition. In the first run, we create partitions based on
953 /// the symbols that need to be compiled. For each partition P, we
954 /// hash the symbols in P and create a `WorkProduct` record associated
955 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
958 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
959 /// judged to be clean (which means none of the things we read to
960 /// generate the partition were found to be dirty), it will be loaded
961 /// into previous work products. We will then regenerate the set of
962 /// symbols in the partition P and hash them (note that new symbols
963 /// may be added -- for example, new monomorphizations -- even if
964 /// nothing in P changed!). We will compare that hash against the
965 /// previous hash. If it matches up, we can reuse the object file.
966 #[derive(Clone, Debug, Encodable, Decodable)]
967 pub struct WorkProduct {
968 pub cgu_name: String,
969 /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
970 /// saved file and the key is some identifier for the type of file being saved.
972 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
973 /// the object file's path, and "dwo" to the dwarf object file's path.
974 pub saved_files: FxHashMap<String, String>,
977 // Index type for `DepNodeData`'s edges.
978 rustc_index::newtype_index! {
982 /// `CurrentDepGraph` stores the dependency graph for the current session. It
983 /// will be populated as we run queries or tasks. We never remove nodes from the
984 /// graph: they are only added.
986 /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
987 /// in memory. This is important, because these graph structures are some of the
988 /// largest in the compiler.
990 /// For this reason, we avoid storing `DepNode`s more than once as map
991 /// keys. The `new_node_to_index` map only contains nodes not in the previous
992 /// graph, and we map nodes in the previous graph to indices via a two-step
993 /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
994 /// and the `prev_index_to_index` vector (which is more compact and faster than
995 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
997 /// This struct uses three locks internally. The `data`, `new_node_to_index`,
998 /// and `prev_index_to_index` fields are locked separately. Operations that take
999 /// a `DepNodeIndex` typically just access the `data` field.
1001 /// We only need to manipulate at most two locks simultaneously:
1002 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1003 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
1004 /// first, and `data` second.
1005 pub(super) struct CurrentDepGraph<K: DepKind> {
1006 encoder: Steal<GraphEncoder<K>>,
1007 new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
1008 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1010 /// This is used to verify that fingerprints do not change between the creation of a node
1011 /// and its recomputation.
1012 #[cfg(debug_assertions)]
1013 fingerprints: Lock<FxHashMap<DepNode<K>, Fingerprint>>,
1015 /// Used to trap when a specific edge is added to the graph.
1016 /// This is used for debug purposes and is only active with `debug_assertions`.
1017 #[cfg(debug_assertions)]
1018 forbidden_edge: Option<EdgeFilter<K>>,
1020 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1021 /// their edges. This has the beneficial side-effect that multiple anonymous
1022 /// nodes can be coalesced into one without changing the semantics of the
1023 /// dependency graph. However, the merging of nodes can lead to a subtle
1024 /// problem during red-green marking: The color of an anonymous node from
1025 /// the current session might "shadow" the color of the node with the same
1026 /// ID from the previous session. In order to side-step this problem, we make
1027 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1028 /// This is implemented by mixing a session-key into the ID fingerprint of
1029 /// each anon node. The session-key is just a random number generated when
1030 /// the `DepGraph` is created.
1031 anon_id_seed: Fingerprint,
1033 /// These are simple counters that are for profiling and
1034 /// debugging and only active with `debug_assertions`.
1035 total_read_count: AtomicU64,
1036 total_duplicate_read_count: AtomicU64,
1038 /// The cached event id for profiling node interning. This saves us
1039 /// from having to look up the event id every time we intern a node
1040 /// which may incur too much overhead.
1041 /// This will be None if self-profiling is disabled.
1042 node_intern_event_id: Option<EventId>,
1045 impl<K: DepKind> CurrentDepGraph<K> {
1047 profiler: &SelfProfilerRef,
1048 prev_graph_node_count: usize,
1049 encoder: FileEncoder,
1052 ) -> CurrentDepGraph<K> {
1053 use std::time::{SystemTime, UNIX_EPOCH};
1055 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1056 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
1057 let mut stable_hasher = StableHasher::new();
1058 nanos.hash(&mut stable_hasher);
1059 let anon_id_seed = stable_hasher.finish();
1061 #[cfg(debug_assertions)]
1062 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1063 Ok(s) => match EdgeFilter::new(&s) {
1065 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1070 // We store a large collection of these in `prev_index_to_index` during
1071 // non-full incremental builds, and want to ensure that the element size
1072 // doesn't inadvertently increase.
1073 static_assert_size!(Option<DepNodeIndex>, 4);
1075 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1077 let node_intern_event_id = profiler
1078 .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
1079 .map(EventId::from_label);
1082 encoder: Steal::new(GraphEncoder::new(
1084 prev_graph_node_count,
1088 new_node_to_index: Sharded::new(|| {
1089 FxHashMap::with_capacity_and_hasher(
1090 new_node_count_estimate / sharded::SHARDS,
1094 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
1096 #[cfg(debug_assertions)]
1098 #[cfg(debug_assertions)]
1099 fingerprints: Lock::new(Default::default()),
1100 total_read_count: AtomicU64::new(0),
1101 total_duplicate_read_count: AtomicU64::new(0),
1102 node_intern_event_id,
1106 #[cfg(debug_assertions)]
1107 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
1108 if let Some(forbidden_edge) = &self.forbidden_edge {
1109 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1111 match self.fingerprints.lock().entry(key) {
1112 Entry::Vacant(v) => {
1113 v.insert(fingerprint);
1115 Entry::Occupied(o) => {
1116 assert_eq!(*o.get(), fingerprint, "Unstable fingerprints for {:?}", key);
1121 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1122 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1125 profiler: &SelfProfilerRef,
1128 current_fingerprint: Fingerprint,
1130 let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
1132 Entry::Occupied(entry) => *entry.get(),
1133 Entry::Vacant(entry) => {
1134 let dep_node_index =
1135 self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
1136 entry.insert(dep_node_index);
1141 #[cfg(debug_assertions)]
1142 self.record_edge(dep_node_index, key, current_fingerprint);
1149 profiler: &SelfProfilerRef,
1150 prev_graph: &SerializedDepGraph<K>,
1153 fingerprint: Option<Fingerprint>,
1155 ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1156 let print_status = cfg!(debug_assertions) && print_status;
1158 // Get timer for profiling `DepNode` interning
1159 let _node_intern_timer =
1160 self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
1162 if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1163 // Determine the color and index of the new `DepNode`.
1164 if let Some(fingerprint) = fingerprint {
1165 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1167 eprintln!("[task::green] {:?}", key);
1170 // This is a green node: it existed in the previous compilation,
1171 // its query was re-executed, and it has the same result as before.
1172 let mut prev_index_to_index = self.prev_index_to_index.lock();
1174 let dep_node_index = match prev_index_to_index[prev_index] {
1175 Some(dep_node_index) => dep_node_index,
1177 let dep_node_index =
1178 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1179 prev_index_to_index[prev_index] = Some(dep_node_index);
1184 #[cfg(debug_assertions)]
1185 self.record_edge(dep_node_index, key, fingerprint);
1186 (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1189 eprintln!("[task::red] {:?}", key);
1192 // This is a red node: it existed in the previous compilation, its query
1193 // was re-executed, but it has a different result from before.
1194 let mut prev_index_to_index = self.prev_index_to_index.lock();
1196 let dep_node_index = match prev_index_to_index[prev_index] {
1197 Some(dep_node_index) => dep_node_index,
1199 let dep_node_index =
1200 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1201 prev_index_to_index[prev_index] = Some(dep_node_index);
1206 #[cfg(debug_assertions)]
1207 self.record_edge(dep_node_index, key, fingerprint);
1208 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1212 eprintln!("[task::unknown] {:?}", key);
1215 // This is a red node, effectively: it existed in the previous compilation
1216 // session, its query was re-executed, but it doesn't compute a result hash
1217 // (i.e. it represents a `no_hash` query), so we have no way of determining
1218 // whether or not the result was the same as before.
1219 let mut prev_index_to_index = self.prev_index_to_index.lock();
1221 let dep_node_index = match prev_index_to_index[prev_index] {
1222 Some(dep_node_index) => dep_node_index,
1224 let dep_node_index =
1225 self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
1226 prev_index_to_index[prev_index] = Some(dep_node_index);
1231 #[cfg(debug_assertions)]
1232 self.record_edge(dep_node_index, key, Fingerprint::ZERO);
1233 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1237 eprintln!("[task::new] {:?}", key);
1240 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1242 // This is a new node: it didn't exist in the previous compilation session.
1243 let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
1245 (dep_node_index, None)
1249 fn promote_node_and_deps_to_current(
1251 profiler: &SelfProfilerRef,
1252 prev_graph: &SerializedDepGraph<K>,
1253 prev_index: SerializedDepNodeIndex,
1255 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1257 let mut prev_index_to_index = self.prev_index_to_index.lock();
1259 match prev_index_to_index[prev_index] {
1260 Some(dep_node_index) => dep_node_index,
1262 let key = prev_graph.index_to_node(prev_index);
1263 let edges = prev_graph
1264 .edge_targets_from(prev_index)
1266 .map(|i| prev_index_to_index[*i].unwrap())
1268 let fingerprint = prev_graph.fingerprint_by_index(prev_index);
1269 let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges);
1270 prev_index_to_index[prev_index] = Some(dep_node_index);
1271 #[cfg(debug_assertions)]
1272 self.record_edge(dep_node_index, key, fingerprint);
1279 fn debug_assert_not_in_new_nodes(
1281 prev_graph: &SerializedDepGraph<K>,
1282 prev_index: SerializedDepNodeIndex,
1284 let node = &prev_graph.index_to_node(prev_index);
1286 !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
1287 "node from previous graph present in new node collection"
1292 /// The capacity of the `reads` field `SmallVec`
1293 const TASK_DEPS_READS_CAP: usize = 8;
1294 type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1296 #[derive(Debug, Clone, Copy)]
1297 pub enum TaskDepsRef<'a, K: DepKind> {
1298 /// New dependencies can be added to the
1299 /// `TaskDeps`. This is used when executing a 'normal' query
1300 /// (no `eval_always` modifier)
1301 Allow(&'a Lock<TaskDeps<K>>),
1302 /// New dependencies are ignored. This is used when
1303 /// executing an `eval_always` query, since there's no
1304 /// need to track dependencies for a query that's always
1305 /// re-executed. This is also used for `dep_graph.with_ignore`
1307 /// Any attempt to add new dependencies will cause a panic.
1308 /// This is used when decoding a query result from disk,
1309 /// to ensure that the decoding process doesn't itself
1310 /// require the execution of any queries.
1315 pub struct TaskDeps<K: DepKind> {
1316 #[cfg(debug_assertions)]
1317 node: Option<DepNode<K>>,
1319 read_set: FxHashSet<DepNodeIndex>,
1320 phantom_data: PhantomData<DepNode<K>>,
1323 impl<K: DepKind> Default for TaskDeps<K> {
1324 fn default() -> Self {
1326 #[cfg(debug_assertions)]
1328 reads: EdgesVec::new(),
1329 read_set: FxHashSet::default(),
1330 phantom_data: PhantomData,
1335 // A data structure that stores Option<DepNodeColor> values as a contiguous
1336 // array, using one u32 per entry.
1337 struct DepNodeColorMap {
1338 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1341 const COMPRESSED_NONE: u32 = 0;
1342 const COMPRESSED_RED: u32 = 1;
1343 const COMPRESSED_FIRST_GREEN: u32 = 2;
1345 impl DepNodeColorMap {
1346 fn new(size: usize) -> DepNodeColorMap {
1347 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1351 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1352 match self.values[index].load(Ordering::Acquire) {
1353 COMPRESSED_NONE => None,
1354 COMPRESSED_RED => Some(DepNodeColor::Red),
1356 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1361 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1362 self.values[index].store(
1364 DepNodeColor::Red => COMPRESSED_RED,
1365 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,