1 use crate::error::CycleStack;
2 use crate::query::plumbing::CycleError;
3 use crate::query::{QueryContext, QueryStackFrame};
5 use rustc_data_structures::fx::FxHashMap;
6 use rustc_errors::{Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level};
7 use rustc_hir::def::DefKind;
8 use rustc_session::{Session, SessionDiagnostic};
12 use std::num::NonZeroU64;
14 #[cfg(parallel_compiler)]
16 parking_lot::{Condvar, Mutex},
17 rustc_data_structures::fx::FxHashSet,
18 rustc_data_structures::sync::Lock,
19 rustc_data_structures::sync::Lrc,
20 rustc_data_structures::{jobserver, OnDrop},
21 rustc_rayon_core as rayon_core,
23 std::iter::{self, FromIterator},
27 /// Represents a span and a query key.
28 #[derive(Clone, Debug)]
29 pub struct QueryInfo {
30 /// The span corresponding to the reason for which this query was required.
32 pub query: QueryStackFrame,
35 pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
37 /// A value uniquely identifying an active query job.
38 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
39 pub struct QueryJobId(pub NonZeroU64);
42 fn query(self, map: &QueryMap) -> QueryStackFrame {
43 map.get(&self).unwrap().query.clone()
46 #[cfg(parallel_compiler)]
47 fn span(self, map: &QueryMap) -> Span {
48 map.get(&self).unwrap().job.span
51 #[cfg(parallel_compiler)]
52 fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
53 map.get(&self).unwrap().job.parent
56 #[cfg(parallel_compiler)]
57 fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
58 map.get(&self).unwrap().job.latch.as_ref()
63 pub struct QueryJobInfo {
64 pub query: QueryStackFrame,
68 /// Represents an active query job.
73 /// The span corresponding to the reason for which this query was required.
76 /// The parent query job which created this job and is implicitly waiting on it.
77 pub parent: Option<QueryJobId>,
79 /// The latch that is used to wait on this job.
80 #[cfg(parallel_compiler)]
81 latch: Option<QueryLatch>,
85 /// Creates a new query job.
87 pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
92 #[cfg(parallel_compiler)]
97 #[cfg(parallel_compiler)]
98 pub(super) fn latch(&mut self) -> QueryLatch {
99 if self.latch.is_none() {
100 self.latch = Some(QueryLatch::new());
102 self.latch.as_ref().unwrap().clone()
105 /// Signals to waiters that the query is complete.
107 /// This does nothing for single threaded rustc,
108 /// as there are no concurrent jobs which could be waiting on us
110 pub fn signal_complete(self) {
111 #[cfg(parallel_compiler)]
113 if let Some(latch) = self.latch {
123 #[cfg(not(parallel_compiler))]
124 pub(super) fn find_cycle_in_stack(
127 current_job: &Option<QueryJobId>,
130 // Find the waitee amongst `current_job` parents
131 let mut cycle = Vec::new();
132 let mut current_job = Option::clone(current_job);
134 while let Some(job) = current_job {
135 let info = query_map.get(&job).unwrap();
136 cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
141 // This is the end of the cycle
142 // The span entry we included was for the usage
143 // of the cycle itself, and not part of the cycle
144 // Replace it with the span which caused the cycle to form
145 cycle[0].span = span;
146 // Find out why the cycle itself was used
151 .map(|parent| (info.job.span, parent.query(&query_map)));
152 return CycleError { usage, cycle };
155 current_job = info.job.parent;
158 panic!("did not find a cycle")
163 pub fn try_find_layout_root(&self, query_map: QueryMap) -> Option<(QueryJobInfo, usize)> {
164 let mut last_layout = None;
165 let mut current_id = Some(*self);
168 while let Some(id) = current_id {
169 let info = query_map.get(&id).unwrap();
170 if info.query.name == "layout_of" {
172 last_layout = Some((info.clone(), depth));
174 current_id = info.job.parent;
180 #[cfg(parallel_compiler)]
182 query: Option<QueryJobId>,
185 cycle: Lock<Option<CycleError>>,
188 #[cfg(parallel_compiler)]
190 fn notify(&self, registry: &rayon_core::Registry) {
191 rayon_core::mark_unblocked(registry);
192 self.condvar.notify_one();
196 #[cfg(parallel_compiler)]
197 struct QueryLatchInfo {
199 waiters: Vec<Lrc<QueryWaiter>>,
202 #[cfg(parallel_compiler)]
204 pub(super) struct QueryLatch {
205 info: Lrc<Mutex<QueryLatchInfo>>,
208 #[cfg(parallel_compiler)]
212 info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
216 /// Awaits for the query job to complete.
217 pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
219 Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
220 self.wait_on_inner(&waiter);
221 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
222 // although another thread may still have a Lrc reference so we cannot
224 let mut cycle = waiter.cycle.lock();
227 Some(cycle) => Err(cycle),
231 /// Awaits the caller on this latch by blocking the current thread.
232 fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
233 let mut info = self.info.lock();
235 // We push the waiter on to the `waiters` list. It can be accessed inside
236 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
237 // Both of these will remove it from the `waiters` list before resuming
239 info.waiters.push(waiter.clone());
241 // If this detects a deadlock and the deadlock handler wants to resume this thread
242 // we have to be in the `wait` call. This is ensured by the deadlock handler
243 // getting the self.info lock.
244 rayon_core::mark_blocked();
245 jobserver::release_thread();
246 waiter.condvar.wait(&mut info);
247 // Release the lock before we potentially block in `acquire_thread`
249 jobserver::acquire_thread();
253 /// Sets the latch and resumes all waiters on it
255 let mut info = self.info.lock();
256 debug_assert!(!info.complete);
257 info.complete = true;
258 let registry = rayon_core::Registry::current();
259 for waiter in info.waiters.drain(..) {
260 waiter.notify(®istry);
264 /// Removes a single waiter from the list of waiters.
265 /// This is used to break query cycles.
266 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
267 let mut info = self.info.lock();
268 debug_assert!(!info.complete);
269 // Remove the waiter from the list of waiters
270 info.waiters.remove(waiter)
274 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
275 #[cfg(parallel_compiler)]
276 type Waiter = (QueryJobId, usize);
278 /// Visits all the non-resumable and resumable waiters of a query.
279 /// Only waiters in a query are visited.
280 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
281 /// and a span indicating the reason the query waited on `query_ref`.
282 /// If `visit` returns Some, this function returns.
283 /// For visits of non-resumable waiters it returns the return value of `visit`.
284 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
285 /// required information to resume the waiter.
286 /// If all `visit` calls returns None, this function also returns None.
287 #[cfg(parallel_compiler)]
288 fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
290 F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
292 // Visit the parent query which is a non-resumable waiter since it's on the same stack
293 if let Some(parent) = query.parent(query_map) {
294 if let Some(cycle) = visit(query.span(query_map), parent) {
299 // Visit the explicit waiters which use condvars and are resumable
300 if let Some(latch) = query.latch(query_map) {
301 for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
302 if let Some(waiter_query) = waiter.query {
303 if visit(waiter.span, waiter_query).is_some() {
304 // Return a value which indicates that this waiter can be resumed
305 return Some(Some((query, i)));
314 /// Look for query cycles by doing a depth first search starting at `query`.
315 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
316 /// If a cycle is detected, this initial value is replaced with the span causing
318 #[cfg(parallel_compiler)]
320 query_map: &QueryMap,
323 stack: &mut Vec<(Span, QueryJobId)>,
324 visited: &mut FxHashSet<QueryJobId>,
325 ) -> Option<Option<Waiter>> {
326 if !visited.insert(query) {
327 return if let Some(p) = stack.iter().position(|q| q.1 == query) {
328 // We detected a query cycle, fix up the initial span and return Some
330 // Remove previous stack entries
332 // Replace the span for the first query with the cycle cause
340 // Query marked as visited is added it to the stack
341 stack.push((span, query));
343 // Visit all the waiters
344 let r = visit_waiters(query_map, query, |span, successor| {
345 cycle_check(query_map, successor, span, stack, visited)
348 // Remove the entry in our stack if we didn't find a cycle
356 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
357 /// from `query` without going through any of the queries in `visited`.
358 /// This is achieved with a depth first search.
359 #[cfg(parallel_compiler)]
360 fn connected_to_root(
361 query_map: &QueryMap,
363 visited: &mut FxHashSet<QueryJobId>,
365 // We already visited this or we're deliberately ignoring it
366 if !visited.insert(query) {
370 // This query is connected to the root (it has no query parent), return true
371 if query.parent(query_map).is_none() {
375 visit_waiters(query_map, query, |_, successor| {
376 connected_to_root(query_map, successor, visited).then_some(None)
381 // Deterministically pick an query from a list
382 #[cfg(parallel_compiler)]
383 fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
385 F: Fn(&T) -> (Span, QueryJobId),
387 // Deterministically pick an entry point
388 // FIXME: Sort this instead
392 let (span, query) = f(v);
393 let hash = query.query(query_map).hash;
394 // Prefer entry points which have valid spans for nicer error messages
395 // We add an integer to the tuple ensuring that entry points
396 // with valid spans are picked first
397 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
403 /// Looks for query cycles starting from the last query in `jobs`.
404 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
405 /// the function return true.
406 /// If a cycle was not found, the starting query is removed from `jobs` and
407 /// the function returns false.
408 #[cfg(parallel_compiler)]
410 query_map: &QueryMap,
411 jobs: &mut Vec<QueryJobId>,
412 wakelist: &mut Vec<Lrc<QueryWaiter>>,
414 let mut visited = FxHashSet::default();
415 let mut stack = Vec::new();
416 // Look for a cycle starting with the last query in `jobs`
417 if let Some(waiter) =
418 cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
420 // The stack is a vector of pairs of spans and queries; reverse it so that
421 // the earlier entries require later entries
422 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
424 // Shift the spans so that queries are matched with the span for their waitee
425 spans.rotate_right(1);
427 // Zip them back together
428 let mut stack: Vec<_> = iter::zip(spans, queries).collect();
430 // Remove the queries in our cycle from the list of jobs to look at
432 if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
437 // Find the queries in the cycle which are
438 // connected to queries outside the cycle
439 let entry_points = stack
441 .filter_map(|&(span, query)| {
442 if query.parent(query_map).is_none() {
443 // This query is connected to the root (it has no query parent)
444 Some((span, query, None))
446 let mut waiters = Vec::new();
447 // Find all the direct waiters who lead to the root
448 visit_waiters(query_map, query, |span, waiter| {
449 // Mark all the other queries in the cycle as already visited
450 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
452 if connected_to_root(query_map, waiter, &mut visited) {
453 waiters.push((span, waiter));
458 if waiters.is_empty() {
461 // Deterministically pick one of the waiters to show to the user
462 let waiter = *pick_query(query_map, &waiters, |s| *s);
463 Some((span, query, Some(waiter)))
467 .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
469 // Deterministically pick an entry point
470 let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
472 // Shift the stack so that our entry point is first
473 let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
474 if let Some(pos) = entry_point_pos {
475 stack.rotate_left(pos);
478 let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
480 // Create the cycle error
481 let error = CycleError {
485 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
489 // We unwrap `waiter` here since there must always be one
490 // edge which is resumable / waited using a query latch
491 let (waitee_query, waiter_idx) = waiter.unwrap();
493 // Extract the waiter we want to resume
494 let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
496 // Set the cycle error so it will be picked up when resumed
497 *waiter.cycle.lock() = Some(error);
499 // Put the waiter on the list of things to resume
500 wakelist.push(waiter);
508 /// Detects query cycles by using depth first search over all active query jobs.
509 /// If a query cycle is found it will break the cycle by finding an edge which
510 /// uses a query latch and then resuming that waiter.
511 /// There may be multiple cycles involved in a deadlock, so this searches
512 /// all active queries for cycles before finally resuming all the waiters at once.
513 #[cfg(parallel_compiler)]
514 pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
515 let on_panic = OnDrop(|| {
516 eprintln!("deadlock handler panicked, aborting process");
520 let mut wakelist = Vec::new();
521 let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
523 let mut found_cycle = false;
525 while jobs.len() > 0 {
526 if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
531 // Check that a cycle was found. It is possible for a deadlock to occur without
532 // a query cycle if a query which can be waited on uses Rayon to do multithreading
533 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
534 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
535 // which in turn will wait on X causing a deadlock. We have a false dependency from
536 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
537 // only considers the true dependency and won't detect a cycle.
538 assert!(found_cycle);
540 // FIXME: Ensure this won't cause a deadlock before we return
541 for waiter in wakelist.into_iter() {
542 waiter.notify(registry);
550 pub(crate) fn report_cycle<'a>(
552 CycleError { usage, cycle: stack }: CycleError,
553 ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
554 assert!(!stack.is_empty());
556 let span = stack[0].query.default_span(stack[1 % stack.len()].span);
558 let mut cycle_stack = Vec::new();
560 use crate::error::StackCount;
561 let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
563 for i in 1..stack.len() {
564 let query = &stack[i].query;
565 let span = query.default_span(stack[(i + 1) % stack.len()].span);
566 cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
569 let mut cycle_usage = None;
570 if let Some((span, query)) = usage {
571 cycle_usage = Some(crate::error::CycleUsage {
572 span: query.default_span(span),
573 usage: query.description,
577 let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
578 Some(crate::error::Alias::Ty)
579 } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
580 Some(crate::error::Alias::Trait)
585 let cycle_diag = crate::error::Cycle {
588 stack_bottom: stack[0].query.description.to_owned(),
590 cycle_usage: cycle_usage,
594 cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
597 pub fn print_query_stack<CTX: QueryContext>(
599 mut current_query: Option<QueryJobId>,
601 num_frames: Option<usize>,
603 // Be careful relying on global state here: this code is called from
604 // a panic hook, which means that the global `Handler` may be in a weird
605 // state if it was responsible for triggering the panic.
607 let query_map = tcx.try_collect_active_jobs();
609 while let Some(query) = current_query {
610 if Some(i) == num_frames {
613 let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
616 let mut diag = Diagnostic::new(
618 &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description),
620 diag.span = query_info.job.span.into();
621 handler.force_print_diagnostic(diag);
623 current_query = query_info.job.parent;