1 use crate::dep_graph::DepContext;
2 use crate::query::plumbing::CycleError;
3 use crate::query::{QueryContext, QueryStackFrame, SimpleDefKind};
5 use rustc_data_structures::fx::FxHashMap;
6 use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
7 use rustc_session::Session;
10 use std::convert::TryFrom;
12 use std::num::NonZeroU32;
14 #[cfg(parallel_compiler)]
16 crate::dep_graph::DepKind,
17 parking_lot::{Condvar, Mutex},
18 rustc_data_structures::fx::FxHashSet,
19 rustc_data_structures::sync::Lock,
20 rustc_data_structures::sync::Lrc,
21 rustc_data_structures::{jobserver, OnDrop},
22 rustc_rayon_core as rayon_core,
24 std::iter::{self, FromIterator},
28 /// Represents a span and a query key.
29 #[derive(Clone, Debug)]
30 pub struct QueryInfo {
31 /// The span corresponding to the reason for which this query was required.
33 pub query: QueryStackFrame,
36 pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
38 /// A value uniquely identifying an active query job within a shard in the query cache.
39 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
40 pub struct QueryShardJobId(pub NonZeroU32);
42 /// A value uniquely identifying an active query job.
43 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
44 pub struct QueryJobId<D> {
45 /// Which job within a shard is this
46 pub job: QueryShardJobId,
48 /// In which shard is this job
51 /// What kind of query this job is.
57 D: Copy + Clone + Eq + Hash,
59 pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
60 QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
63 fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
64 map.get(&self).unwrap().query.clone()
67 #[cfg(parallel_compiler)]
68 fn span(self, map: &QueryMap<D>) -> Span {
69 map.get(&self).unwrap().job.span
72 #[cfg(parallel_compiler)]
73 fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
74 map.get(&self).unwrap().job.parent
77 #[cfg(parallel_compiler)]
78 fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
79 map.get(&self).unwrap().job.latch.as_ref()
83 pub struct QueryJobInfo<D> {
84 pub query: QueryStackFrame,
88 /// Represents an active query job.
90 pub struct QueryJob<D> {
91 pub id: QueryShardJobId,
93 /// The span corresponding to the reason for which this query was required.
96 /// The parent query job which created this job and is implicitly waiting on it.
97 pub parent: Option<QueryJobId<D>>,
99 /// The latch that is used to wait on this job.
100 #[cfg(parallel_compiler)]
101 latch: Option<QueryLatch<D>>,
106 D: Copy + Clone + Eq + Hash,
108 /// Creates a new query job.
109 pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
114 #[cfg(parallel_compiler)]
119 #[cfg(parallel_compiler)]
120 pub(super) fn latch(&mut self) -> QueryLatch<D> {
121 if self.latch.is_none() {
122 self.latch = Some(QueryLatch::new());
124 self.latch.as_ref().unwrap().clone()
127 /// Signals to waiters that the query is complete.
129 /// This does nothing for single threaded rustc,
130 /// as there are no concurrent jobs which could be waiting on us
131 pub fn signal_complete(self) {
132 #[cfg(parallel_compiler)]
134 if let Some(latch) = self.latch {
141 #[cfg(not(parallel_compiler))]
142 impl<D> QueryJobId<D>
144 D: Copy + Clone + Eq + Hash,
148 pub(super) fn find_cycle_in_stack(
150 query_map: QueryMap<D>,
151 current_job: &Option<QueryJobId<D>>,
154 // Find the waitee amongst `current_job` parents
155 let mut cycle = Vec::new();
156 let mut current_job = Option::clone(current_job);
158 while let Some(job) = current_job {
159 let info = query_map.get(&job).unwrap();
160 cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
165 // This is the end of the cycle
166 // The span entry we included was for the usage
167 // of the cycle itself, and not part of the cycle
168 // Replace it with the span which caused the cycle to form
169 cycle[0].span = span;
170 // Find out why the cycle itself was used
175 .map(|parent| (info.job.span, parent.query(&query_map)));
176 return CycleError { usage, cycle };
179 current_job = info.job.parent;
182 panic!("did not find a cycle")
186 #[cfg(parallel_compiler)]
187 struct QueryWaiter<D> {
188 query: Option<QueryJobId<D>>,
191 cycle: Lock<Option<CycleError>>,
194 #[cfg(parallel_compiler)]
195 impl<D> QueryWaiter<D> {
196 fn notify(&self, registry: &rayon_core::Registry) {
197 rayon_core::mark_unblocked(registry);
198 self.condvar.notify_one();
202 #[cfg(parallel_compiler)]
203 struct QueryLatchInfo<D> {
205 waiters: Vec<Lrc<QueryWaiter<D>>>,
208 #[cfg(parallel_compiler)]
210 pub(super) struct QueryLatch<D> {
211 info: Lrc<Mutex<QueryLatchInfo<D>>>,
214 #[cfg(parallel_compiler)]
215 impl<D: Eq + Hash> QueryLatch<D> {
218 info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
223 #[cfg(parallel_compiler)]
224 impl<D> QueryLatch<D> {
225 /// Awaits for the query job to complete.
226 pub(super) fn wait_on(
228 query: Option<QueryJobId<D>>,
230 ) -> Result<(), CycleError> {
232 Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
233 self.wait_on_inner(&waiter);
234 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
235 // although another thread may still have a Lrc reference so we cannot
237 let mut cycle = waiter.cycle.lock();
240 Some(cycle) => Err(cycle),
244 /// Awaits the caller on this latch by blocking the current thread.
245 fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
246 let mut info = self.info.lock();
248 // We push the waiter on to the `waiters` list. It can be accessed inside
249 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
250 // Both of these will remove it from the `waiters` list before resuming
252 info.waiters.push(waiter.clone());
254 // If this detects a deadlock and the deadlock handler wants to resume this thread
255 // we have to be in the `wait` call. This is ensured by the deadlock handler
256 // getting the self.info lock.
257 rayon_core::mark_blocked();
258 jobserver::release_thread();
259 waiter.condvar.wait(&mut info);
260 // Release the lock before we potentially block in `acquire_thread`
262 jobserver::acquire_thread();
266 /// Sets the latch and resumes all waiters on it
268 let mut info = self.info.lock();
269 debug_assert!(!info.complete);
270 info.complete = true;
271 let registry = rayon_core::Registry::current();
272 for waiter in info.waiters.drain(..) {
273 waiter.notify(®istry);
277 /// Removes a single waiter from the list of waiters.
278 /// This is used to break query cycles.
279 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
280 let mut info = self.info.lock();
281 debug_assert!(!info.complete);
282 // Remove the waiter from the list of waiters
283 info.waiters.remove(waiter)
287 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
288 #[cfg(parallel_compiler)]
289 type Waiter<D> = (QueryJobId<D>, usize);
291 /// Visits all the non-resumable and resumable waiters of a query.
292 /// Only waiters in a query are visited.
293 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
294 /// and a span indicating the reason the query waited on `query_ref`.
295 /// If `visit` returns Some, this function returns.
296 /// For visits of non-resumable waiters it returns the return value of `visit`.
297 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
298 /// required information to resume the waiter.
299 /// If all `visit` calls returns None, this function also returns None.
300 #[cfg(parallel_compiler)]
301 fn visit_waiters<D, F>(
302 query_map: &QueryMap<D>,
303 query: QueryJobId<D>,
305 ) -> Option<Option<Waiter<D>>>
307 D: Copy + Clone + Eq + Hash,
308 F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
310 // Visit the parent query which is a non-resumable waiter since it's on the same stack
311 if let Some(parent) = query.parent(query_map) {
312 if let Some(cycle) = visit(query.span(query_map), parent) {
317 // Visit the explicit waiters which use condvars and are resumable
318 if let Some(latch) = query.latch(query_map) {
319 for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
320 if let Some(waiter_query) = waiter.query {
321 if visit(waiter.span, waiter_query).is_some() {
322 // Return a value which indicates that this waiter can be resumed
323 return Some(Some((query, i)));
332 /// Look for query cycles by doing a depth first search starting at `query`.
333 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
334 /// If a cycle is detected, this initial value is replaced with the span causing
336 #[cfg(parallel_compiler)]
338 query_map: &QueryMap<D>,
339 query: QueryJobId<D>,
341 stack: &mut Vec<(Span, QueryJobId<D>)>,
342 visited: &mut FxHashSet<QueryJobId<D>>,
343 ) -> Option<Option<Waiter<D>>>
345 D: Copy + Clone + Eq + Hash,
347 if !visited.insert(query) {
348 return if let Some(p) = stack.iter().position(|q| q.1 == query) {
349 // We detected a query cycle, fix up the initial span and return Some
351 // Remove previous stack entries
353 // Replace the span for the first query with the cycle cause
361 // Query marked as visited is added it to the stack
362 stack.push((span, query));
364 // Visit all the waiters
365 let r = visit_waiters(query_map, query, |span, successor| {
366 cycle_check(query_map, successor, span, stack, visited)
369 // Remove the entry in our stack if we didn't find a cycle
377 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
378 /// from `query` without going through any of the queries in `visited`.
379 /// This is achieved with a depth first search.
380 #[cfg(parallel_compiler)]
381 fn connected_to_root<D>(
382 query_map: &QueryMap<D>,
383 query: QueryJobId<D>,
384 visited: &mut FxHashSet<QueryJobId<D>>,
387 D: Copy + Clone + Eq + Hash,
389 // We already visited this or we're deliberately ignoring it
390 if !visited.insert(query) {
394 // This query is connected to the root (it has no query parent), return true
395 if query.parent(query_map).is_none() {
399 visit_waiters(query_map, query, |_, successor| {
400 connected_to_root(query_map, successor, visited).then_some(None)
405 // Deterministically pick an query from a list
406 #[cfg(parallel_compiler)]
407 fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
409 D: Copy + Clone + Eq + Hash,
410 F: Fn(&T) -> (Span, QueryJobId<D>),
412 // Deterministically pick an entry point
413 // FIXME: Sort this instead
417 let (span, query) = f(v);
418 let hash = query.query(query_map).hash;
419 // Prefer entry points which have valid spans for nicer error messages
420 // We add an integer to the tuple ensuring that entry points
421 // with valid spans are picked first
422 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
428 /// Looks for query cycles starting from the last query in `jobs`.
429 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
430 /// the function return true.
431 /// If a cycle was not found, the starting query is removed from `jobs` and
432 /// the function returns false.
433 #[cfg(parallel_compiler)]
434 fn remove_cycle<D: DepKind>(
435 query_map: &QueryMap<D>,
436 jobs: &mut Vec<QueryJobId<D>>,
437 wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
439 let mut visited = FxHashSet::default();
440 let mut stack = Vec::new();
441 // Look for a cycle starting with the last query in `jobs`
442 if let Some(waiter) =
443 cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
445 // The stack is a vector of pairs of spans and queries; reverse it so that
446 // the earlier entries require later entries
447 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
449 // Shift the spans so that queries are matched with the span for their waitee
450 spans.rotate_right(1);
452 // Zip them back together
453 let mut stack: Vec<_> = iter::zip(spans, queries).collect();
455 // Remove the queries in our cycle from the list of jobs to look at
457 if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
462 // Find the queries in the cycle which are
463 // connected to queries outside the cycle
464 let entry_points = stack
466 .filter_map(|&(span, query)| {
467 if query.parent(query_map).is_none() {
468 // This query is connected to the root (it has no query parent)
469 Some((span, query, None))
471 let mut waiters = Vec::new();
472 // Find all the direct waiters who lead to the root
473 visit_waiters(query_map, query, |span, waiter| {
474 // Mark all the other queries in the cycle as already visited
475 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
477 if connected_to_root(query_map, waiter, &mut visited) {
478 waiters.push((span, waiter));
483 if waiters.is_empty() {
486 // Deterministically pick one of the waiters to show to the user
487 let waiter = *pick_query(query_map, &waiters, |s| *s);
488 Some((span, query, Some(waiter)))
492 .collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>();
494 // Deterministically pick an entry point
495 let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
497 // Shift the stack so that our entry point is first
498 let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
499 if let Some(pos) = entry_point_pos {
500 stack.rotate_left(pos);
503 let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
505 // Create the cycle error
506 let error = CycleError {
510 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
514 // We unwrap `waiter` here since there must always be one
515 // edge which is resumable / waited using a query latch
516 let (waitee_query, waiter_idx) = waiter.unwrap();
518 // Extract the waiter we want to resume
519 let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
521 // Set the cycle error so it will be picked up when resumed
522 *waiter.cycle.lock() = Some(error);
524 // Put the waiter on the list of things to resume
525 wakelist.push(waiter);
533 /// Detects query cycles by using depth first search over all active query jobs.
534 /// If a query cycle is found it will break the cycle by finding an edge which
535 /// uses a query latch and then resuming that waiter.
536 /// There may be multiple cycles involved in a deadlock, so this searches
537 /// all active queries for cycles before finally resuming all the waiters at once.
538 #[cfg(parallel_compiler)]
539 pub fn deadlock<CTX: QueryContext>(tcx: CTX, registry: &rayon_core::Registry) {
540 let on_panic = OnDrop(|| {
541 eprintln!("deadlock handler panicked, aborting process");
545 let mut wakelist = Vec::new();
546 let query_map = tcx.try_collect_active_jobs().unwrap();
547 let mut jobs: Vec<QueryJobId<CTX::DepKind>> = query_map.keys().cloned().collect();
549 let mut found_cycle = false;
551 while jobs.len() > 0 {
552 if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
557 // Check that a cycle was found. It is possible for a deadlock to occur without
558 // a query cycle if a query which can be waited on uses Rayon to do multithreading
559 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
560 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
561 // which in turn will wait on X causing a deadlock. We have a false dependency from
562 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
563 // only considers the true dependency and won't detect a cycle.
564 assert!(found_cycle);
566 // FIXME: Ensure this won't cause a deadlock before we return
567 for waiter in wakelist.into_iter() {
568 waiter.notify(registry);
576 pub(crate) fn report_cycle<'a>(
578 CycleError { usage, cycle: stack }: CycleError,
579 ) -> DiagnosticBuilder<'a> {
580 assert!(!stack.is_empty());
582 let fix_span = |span: Span, query: &QueryStackFrame| {
583 sess.source_map().guess_head_span(query.default_span(span))
586 let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
588 struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
590 for i in 1..stack.len() {
591 let query = &stack[i].query;
592 let span = fix_span(stack[(i + 1) % stack.len()].span, query);
593 err.span_note(span, &format!("...which requires {}...", query.description));
596 if stack.len() == 1 {
597 err.note(&format!("...which immediately requires {} again", stack[0].query.description));
600 "...which again requires {}, completing the cycle",
601 stack[0].query.description
605 if stack.iter().all(|entry| {
606 entry.query.def_kind.map_or(false, |def_kind| {
607 matches!(def_kind, SimpleDefKind::TyAlias | SimpleDefKind::TraitAlias)
610 if stack.iter().all(|entry| {
614 .map_or(false, |def_kind| matches!(def_kind, SimpleDefKind::TyAlias))
616 err.note("type aliases cannot be recursive");
617 err.help("consider using a struct, enum, or union instead to break the cycle");
618 err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information");
620 err.note("trait aliases cannot be recursive");
624 if let Some((span, query)) = usage {
625 err.span_note(fix_span(span, &query), &format!("cycle used when {}", query.description));
631 pub fn print_query_stack<CTX: QueryContext>(
633 mut current_query: Option<QueryJobId<CTX::DepKind>>,
635 num_frames: Option<usize>,
637 // Be careful relying on global state here: this code is called from
638 // a panic hook, which means that the global `Handler` may be in a weird
639 // state if it was responsible for triggering the panic.
641 let query_map = tcx.try_collect_active_jobs();
643 while let Some(query) = current_query {
644 if Some(i) == num_frames {
647 let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
652 let mut diag = Diagnostic::new(
654 &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description),
657 tcx.dep_context().sess().source_map().guess_head_span(query_info.job.span).into();
658 handler.force_print_diagnostic(diag);
660 current_query = query_info.job.parent;