1 use crate::dep_graph::DepKind;
2 use crate::error::CycleStack;
3 use crate::query::plumbing::CycleError;
4 use crate::query::{QueryContext, QueryStackFrame};
5 use core::marker::PhantomData;
7 use rustc_data_structures::fx::FxHashMap;
9 Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
11 use rustc_hir::def::DefKind;
12 use rustc_session::Session;
16 use std::num::NonZeroU64;
18 #[cfg(parallel_compiler)]
20 parking_lot::{Condvar, Mutex},
21 rustc_data_structures::fx::FxHashSet,
22 rustc_data_structures::sync::Lock,
23 rustc_data_structures::sync::Lrc,
24 rustc_data_structures::{jobserver, OnDrop},
25 rustc_rayon_core as rayon_core,
31 /// Represents a span and a query key.
32 #[derive(Clone, Debug)]
33 pub struct QueryInfo<D: DepKind> {
34 /// The span corresponding to the reason for which this query was required.
36 pub query: QueryStackFrame<D>,
39 pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
41 /// A value uniquely identifying an active query job.
42 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
43 pub struct QueryJobId(pub NonZeroU64);
46 fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
47 map.get(&self).unwrap().query.clone()
50 #[cfg(parallel_compiler)]
51 fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
52 map.get(&self).unwrap().job.span
55 #[cfg(parallel_compiler)]
56 fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
57 map.get(&self).unwrap().job.parent
60 #[cfg(parallel_compiler)]
61 fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
62 map.get(&self).unwrap().job.latch.as_ref()
67 pub struct QueryJobInfo<D: DepKind> {
68 pub query: QueryStackFrame<D>,
72 /// Represents an active query job.
74 pub struct QueryJob<D: DepKind> {
77 /// The span corresponding to the reason for which this query was required.
80 /// The parent query job which created this job and is implicitly waiting on it.
81 pub parent: Option<QueryJobId>,
83 /// The latch that is used to wait on this job.
84 #[cfg(parallel_compiler)]
85 latch: Option<QueryLatch<D>>,
86 spooky: core::marker::PhantomData<D>,
89 impl<D: DepKind> QueryJob<D> {
90 /// Creates a new query job.
92 pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
97 #[cfg(parallel_compiler)]
103 #[cfg(parallel_compiler)]
104 pub(super) fn latch(&mut self) -> QueryLatch<D> {
105 if self.latch.is_none() {
106 self.latch = Some(QueryLatch::new());
108 self.latch.as_ref().unwrap().clone()
111 /// Signals to waiters that the query is complete.
113 /// This does nothing for single threaded rustc,
114 /// as there are no concurrent jobs which could be waiting on us
116 pub fn signal_complete(self) {
117 #[cfg(parallel_compiler)]
119 if let Some(latch) = self.latch {
129 #[cfg(not(parallel_compiler))]
130 pub(super) fn find_cycle_in_stack<D: DepKind>(
132 query_map: QueryMap<D>,
133 current_job: &Option<QueryJobId>,
136 // Find the waitee amongst `current_job` parents
137 let mut cycle = Vec::new();
138 let mut current_job = Option::clone(current_job);
140 while let Some(job) = current_job {
141 let info = query_map.get(&job).unwrap();
142 cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
147 // This is the end of the cycle
148 // The span entry we included was for the usage
149 // of the cycle itself, and not part of the cycle
150 // Replace it with the span which caused the cycle to form
151 cycle[0].span = span;
152 // Find out why the cycle itself was used
157 .map(|parent| (info.job.span, parent.query(&query_map)));
158 return CycleError { usage, cycle };
161 current_job = info.job.parent;
164 panic!("did not find a cycle")
169 pub fn try_find_layout_root<D: DepKind>(
171 query_map: QueryMap<D>,
172 ) -> Option<(QueryJobInfo<D>, usize)> {
173 let mut last_layout = None;
174 let mut current_id = Some(*self);
177 while let Some(id) = current_id {
178 let info = query_map.get(&id).unwrap();
179 // FIXME: This string comparison should probably not be done.
180 if format!("{:?}", info.query.dep_kind) == "layout_of" {
182 last_layout = Some((info.clone(), depth));
184 current_id = info.job.parent;
190 #[cfg(parallel_compiler)]
191 struct QueryWaiter<D: DepKind> {
192 query: Option<QueryJobId>,
195 cycle: Lock<Option<CycleError<D>>>,
198 #[cfg(parallel_compiler)]
199 impl<D: DepKind> QueryWaiter<D> {
200 fn notify(&self, registry: &rayon_core::Registry) {
201 rayon_core::mark_unblocked(registry);
202 self.condvar.notify_one();
206 #[cfg(parallel_compiler)]
207 struct QueryLatchInfo<D: DepKind> {
209 waiters: Vec<Lrc<QueryWaiter<D>>>,
212 #[cfg(parallel_compiler)]
214 pub(super) struct QueryLatch<D: DepKind> {
215 info: Lrc<Mutex<QueryLatchInfo<D>>>,
218 #[cfg(parallel_compiler)]
219 impl<D: DepKind> QueryLatch<D> {
222 info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
226 /// Awaits for the query job to complete.
227 pub(super) fn wait_on(
229 query: Option<QueryJobId>,
231 ) -> Result<(), CycleError<D>> {
233 Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
234 self.wait_on_inner(&waiter);
235 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
236 // although another thread may still have a Lrc reference so we cannot
238 let mut cycle = waiter.cycle.lock();
241 Some(cycle) => Err(cycle),
245 /// Awaits the caller on this latch by blocking the current thread.
246 fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
247 let mut info = self.info.lock();
249 // We push the waiter on to the `waiters` list. It can be accessed inside
250 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
251 // Both of these will remove it from the `waiters` list before resuming
253 info.waiters.push(waiter.clone());
255 // If this detects a deadlock and the deadlock handler wants to resume this thread
256 // we have to be in the `wait` call. This is ensured by the deadlock handler
257 // getting the self.info lock.
258 rayon_core::mark_blocked();
259 jobserver::release_thread();
260 waiter.condvar.wait(&mut info);
261 // Release the lock before we potentially block in `acquire_thread`
263 jobserver::acquire_thread();
267 /// Sets the latch and resumes all waiters on it
269 let mut info = self.info.lock();
270 debug_assert!(!info.complete);
271 info.complete = true;
272 let registry = rayon_core::Registry::current();
273 for waiter in info.waiters.drain(..) {
274 waiter.notify(®istry);
278 /// Removes a single waiter from the list of waiters.
279 /// This is used to break query cycles.
280 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
281 let mut info = self.info.lock();
282 debug_assert!(!info.complete);
283 // Remove the waiter from the list of waiters
284 info.waiters.remove(waiter)
288 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
289 #[cfg(parallel_compiler)]
290 type Waiter = (QueryJobId, usize);
292 /// Visits all the non-resumable and resumable waiters of a query.
293 /// Only waiters in a query are visited.
294 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
295 /// and a span indicating the reason the query waited on `query_ref`.
296 /// If `visit` returns Some, this function returns.
297 /// For visits of non-resumable waiters it returns the return value of `visit`.
298 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
299 /// required information to resume the waiter.
300 /// If all `visit` calls returns None, this function also returns None.
301 #[cfg(parallel_compiler)]
302 fn visit_waiters<F, D>(
303 query_map: &QueryMap<D>,
306 ) -> Option<Option<Waiter>>
308 F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
311 // Visit the parent query which is a non-resumable waiter since it's on the same stack
312 if let Some(parent) = query.parent(query_map) {
313 if let Some(cycle) = visit(query.span(query_map), parent) {
318 // Visit the explicit waiters which use condvars and are resumable
319 if let Some(latch) = query.latch(query_map) {
320 for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
321 if let Some(waiter_query) = waiter.query {
322 if visit(waiter.span, waiter_query).is_some() {
323 // Return a value which indicates that this waiter can be resumed
324 return Some(Some((query, i)));
333 /// Look for query cycles by doing a depth first search starting at `query`.
334 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
335 /// If a cycle is detected, this initial value is replaced with the span causing
337 #[cfg(parallel_compiler)]
338 fn cycle_check<D: DepKind>(
339 query_map: &QueryMap<D>,
342 stack: &mut Vec<(Span, QueryJobId)>,
343 visited: &mut FxHashSet<QueryJobId>,
344 ) -> Option<Option<Waiter>> {
345 if !visited.insert(query) {
346 return if let Some(p) = stack.iter().position(|q| q.1 == query) {
347 // We detected a query cycle, fix up the initial span and return Some
349 // Remove previous stack entries
351 // Replace the span for the first query with the cycle cause
359 // Query marked as visited is added it to the stack
360 stack.push((span, query));
362 // Visit all the waiters
363 let r = visit_waiters(query_map, query, |span, successor| {
364 cycle_check(query_map, successor, span, stack, visited)
367 // Remove the entry in our stack if we didn't find a cycle
375 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
376 /// from `query` without going through any of the queries in `visited`.
377 /// This is achieved with a depth first search.
378 #[cfg(parallel_compiler)]
379 fn connected_to_root<D: DepKind>(
380 query_map: &QueryMap<D>,
382 visited: &mut FxHashSet<QueryJobId>,
384 // We already visited this or we're deliberately ignoring it
385 if !visited.insert(query) {
389 // This query is connected to the root (it has no query parent), return true
390 if query.parent(query_map).is_none() {
394 visit_waiters(query_map, query, |_, successor| {
395 connected_to_root(query_map, successor, visited).then_some(None)
400 // Deterministically pick an query from a list
401 #[cfg(parallel_compiler)]
402 fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
404 F: Fn(&T) -> (Span, QueryJobId),
407 // Deterministically pick an entry point
408 // FIXME: Sort this instead
412 let (span, query) = f(v);
413 let hash = query.query(query_map).hash;
414 // Prefer entry points which have valid spans for nicer error messages
415 // We add an integer to the tuple ensuring that entry points
416 // with valid spans are picked first
417 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
423 /// Looks for query cycles starting from the last query in `jobs`.
424 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
425 /// the function return true.
426 /// If a cycle was not found, the starting query is removed from `jobs` and
427 /// the function returns false.
428 #[cfg(parallel_compiler)]
429 fn remove_cycle<D: DepKind>(
430 query_map: &QueryMap<D>,
431 jobs: &mut Vec<QueryJobId>,
432 wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
434 let mut visited = FxHashSet::default();
435 let mut stack = Vec::new();
436 // Look for a cycle starting with the last query in `jobs`
437 if let Some(waiter) =
438 cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
440 // The stack is a vector of pairs of spans and queries; reverse it so that
441 // the earlier entries require later entries
442 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
444 // Shift the spans so that queries are matched with the span for their waitee
445 spans.rotate_right(1);
447 // Zip them back together
448 let mut stack: Vec<_> = iter::zip(spans, queries).collect();
450 // Remove the queries in our cycle from the list of jobs to look at
452 if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
457 // Find the queries in the cycle which are
458 // connected to queries outside the cycle
459 let entry_points = stack
461 .filter_map(|&(span, query)| {
462 if query.parent(query_map).is_none() {
463 // This query is connected to the root (it has no query parent)
464 Some((span, query, None))
466 let mut waiters = Vec::new();
467 // Find all the direct waiters who lead to the root
468 visit_waiters(query_map, query, |span, waiter| {
469 // Mark all the other queries in the cycle as already visited
470 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
472 if connected_to_root(query_map, waiter, &mut visited) {
473 waiters.push((span, waiter));
478 if waiters.is_empty() {
481 // Deterministically pick one of the waiters to show to the user
482 let waiter = *pick_query(query_map, &waiters, |s| *s);
483 Some((span, query, Some(waiter)))
487 .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
489 // Deterministically pick an entry point
490 let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
492 // Shift the stack so that our entry point is first
493 let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
494 if let Some(pos) = entry_point_pos {
495 stack.rotate_left(pos);
498 let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
500 // Create the cycle error
501 let error = CycleError {
505 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
509 // We unwrap `waiter` here since there must always be one
510 // edge which is resumable / waited using a query latch
511 let (waitee_query, waiter_idx) = waiter.unwrap();
513 // Extract the waiter we want to resume
514 let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
516 // Set the cycle error so it will be picked up when resumed
517 *waiter.cycle.lock() = Some(error);
519 // Put the waiter on the list of things to resume
520 wakelist.push(waiter);
528 /// Detects query cycles by using depth first search over all active query jobs.
529 /// If a query cycle is found it will break the cycle by finding an edge which
530 /// uses a query latch and then resuming that waiter.
531 /// There may be multiple cycles involved in a deadlock, so this searches
532 /// all active queries for cycles before finally resuming all the waiters at once.
533 #[cfg(parallel_compiler)]
534 pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
535 let on_panic = OnDrop(|| {
536 eprintln!("deadlock handler panicked, aborting process");
540 let mut wakelist = Vec::new();
541 let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
543 let mut found_cycle = false;
545 while jobs.len() > 0 {
546 if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
551 // Check that a cycle was found. It is possible for a deadlock to occur without
552 // a query cycle if a query which can be waited on uses Rayon to do multithreading
553 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
554 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
555 // which in turn will wait on X causing a deadlock. We have a false dependency from
556 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
557 // only considers the true dependency and won't detect a cycle.
558 assert!(found_cycle);
560 // FIXME: Ensure this won't cause a deadlock before we return
561 for waiter in wakelist.into_iter() {
562 waiter.notify(registry);
570 pub(crate) fn report_cycle<'a, D: DepKind>(
572 CycleError { usage, cycle: stack }: &CycleError<D>,
573 ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
574 assert!(!stack.is_empty());
576 let span = stack[0].query.default_span(stack[1 % stack.len()].span);
578 let mut cycle_stack = Vec::new();
580 use crate::error::StackCount;
581 let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
583 for i in 1..stack.len() {
584 let query = &stack[i].query;
585 let span = query.default_span(stack[(i + 1) % stack.len()].span);
586 cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
589 let mut cycle_usage = None;
590 if let Some((span, ref query)) = *usage {
591 cycle_usage = Some(crate::error::CycleUsage {
592 span: query.default_span(span),
593 usage: query.description.to_string(),
597 let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
598 Some(crate::error::Alias::Ty)
599 } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
600 Some(crate::error::Alias::Trait)
605 let cycle_diag = crate::error::Cycle {
608 stack_bottom: stack[0].query.description.to_owned(),
610 cycle_usage: cycle_usage,
614 cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
617 pub fn print_query_stack<Qcx: QueryContext>(
619 mut current_query: Option<QueryJobId>,
621 num_frames: Option<usize>,
623 // Be careful relying on global state here: this code is called from
624 // a panic hook, which means that the global `Handler` may be in a weird
625 // state if it was responsible for triggering the panic.
627 let query_map = qcx.try_collect_active_jobs();
629 while let Some(query) = current_query {
630 if Some(i) == num_frames {
633 let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
636 let mut diag = Diagnostic::new(
638 &format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description),
640 diag.span = query_info.job.span.into();
641 handler.force_print_diagnostic(diag);
643 current_query = query_info.job.parent;