1 use crate::ty::context::TyCtxt;
2 use crate::ty::query::plumbing::CycleError;
3 use crate::ty::query::Query;
6 use rustc_data_structures::sync::Lrc;
9 #[cfg(not(parallel_compiler))]
12 #[cfg(parallel_compiler)]
14 parking_lot::{Condvar, Mutex},
15 rustc_data_structures::fx::FxHashSet,
16 rustc_data_structures::stable_hasher::{HashStable, StableHasher},
17 rustc_data_structures::sync::Lock,
18 rustc_data_structures::{jobserver, OnDrop},
19 rustc_rayon_core as rayon_core,
21 std::iter::FromIterator,
22 std::{mem, process, thread},
25 /// Represents a span and a query key.
26 #[derive(Clone, Debug)]
27 pub struct QueryInfo<'tcx> {
28 /// The span corresponding to the reason for which this query was required.
30 pub query: Query<'tcx>,
33 /// Representss an object representing an active query job.
34 pub struct QueryJob<'tcx> {
35 pub info: QueryInfo<'tcx>,
37 /// The parent query job which created this job and is implicitly waiting on it.
38 pub parent: Option<Lrc<QueryJob<'tcx>>>,
40 /// The latch that is used to wait on this job.
41 #[cfg(parallel_compiler)]
42 latch: QueryLatch<'tcx>,
45 impl<'tcx> QueryJob<'tcx> {
46 /// Creates a new query job.
47 pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
51 #[cfg(parallel_compiler)]
52 latch: QueryLatch::new(),
56 /// Awaits for the query job to complete.
57 #[cfg(parallel_compiler)]
58 pub(super) fn r#await(&self, tcx: TyCtxt<'tcx>, span: Span) -> Result<(), CycleError<'tcx>> {
59 tls::with_related_context(tcx, move |icx| {
60 let waiter = Lrc::new(QueryWaiter {
61 query: icx.query.clone(),
63 cycle: Lock::new(None),
64 condvar: Condvar::new(),
66 self.latch.r#await(&waiter);
67 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
68 // although another thread may still have a Lrc reference so we cannot
70 let mut cycle = waiter.cycle.lock();
73 Some(cycle) => Err(cycle),
78 #[cfg(not(parallel_compiler))]
79 pub(super) fn find_cycle_in_stack(&self, tcx: TyCtxt<'tcx>, span: Span) -> CycleError<'tcx> {
80 // Get the current executing query (waiter) and find the waitee amongst its parents
81 let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone());
82 let mut cycle = Vec::new();
84 while let Some(job) = current_job {
85 cycle.push(job.info.clone());
87 if ptr::eq(&*job, self) {
90 // This is the end of the cycle
91 // The span entry we included was for the usage
92 // of the cycle itself, and not part of the cycle
93 // Replace it with the span which caused the cycle to form
95 // Find out why the cycle itself was used
97 job.parent.as_ref().map(|parent| (job.info.span, parent.info.query.clone()));
98 return CycleError { usage, cycle };
101 current_job = job.parent.clone();
104 panic!("did not find a cycle")
107 /// Signals to waiters that the query is complete.
109 /// This does nothing for single threaded rustc,
110 /// as there are no concurrent jobs which could be waiting on us
111 pub fn signal_complete(&self) {
112 #[cfg(parallel_compiler)]
116 #[cfg(parallel_compiler)]
117 fn as_ptr(&self) -> *const QueryJob<'tcx> {
122 #[cfg(parallel_compiler)]
123 struct QueryWaiter<'tcx> {
124 query: Option<Lrc<QueryJob<'tcx>>>,
127 cycle: Lock<Option<CycleError<'tcx>>>,
130 #[cfg(parallel_compiler)]
131 impl<'tcx> QueryWaiter<'tcx> {
132 fn notify(&self, registry: &rayon_core::Registry) {
133 rayon_core::mark_unblocked(registry);
134 self.condvar.notify_one();
138 #[cfg(parallel_compiler)]
139 struct QueryLatchInfo<'tcx> {
141 waiters: Vec<Lrc<QueryWaiter<'tcx>>>,
144 #[cfg(parallel_compiler)]
145 struct QueryLatch<'tcx> {
146 info: Mutex<QueryLatchInfo<'tcx>>,
149 #[cfg(parallel_compiler)]
150 impl<'tcx> QueryLatch<'tcx> {
152 QueryLatch { info: Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() }) }
155 /// Awaits the caller on this latch by blocking the current thread.
156 fn r#await(&self, waiter: &Lrc<QueryWaiter<'tcx>>) {
157 let mut info = self.info.lock();
159 // We push the waiter on to the `waiters` list. It can be accessed inside
160 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
161 // Both of these will remove it from the `waiters` list before resuming
163 info.waiters.push(waiter.clone());
165 // If this detects a deadlock and the deadlock handler wants to resume this thread
166 // we have to be in the `wait` call. This is ensured by the deadlock handler
167 // getting the self.info lock.
168 rayon_core::mark_blocked();
169 jobserver::release_thread();
170 waiter.condvar.wait(&mut info);
171 // Release the lock before we potentially block in `acquire_thread`
173 jobserver::acquire_thread();
177 /// Sets the latch and resumes all waiters on it
179 let mut info = self.info.lock();
180 debug_assert!(!info.complete);
181 info.complete = true;
182 let registry = rayon_core::Registry::current();
183 for waiter in info.waiters.drain(..) {
184 waiter.notify(®istry);
188 /// Removes a single waiter from the list of waiters.
189 /// This is used to break query cycles.
190 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<'tcx>> {
191 let mut info = self.info.lock();
192 debug_assert!(!info.complete);
193 // Remove the waiter from the list of waiters
194 info.waiters.remove(waiter)
198 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
199 #[cfg(parallel_compiler)]
200 type Waiter<'tcx> = (Lrc<QueryJob<'tcx>>, usize);
202 /// Visits all the non-resumable and resumable waiters of a query.
203 /// Only waiters in a query are visited.
204 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
205 /// and a span indicating the reason the query waited on `query_ref`.
206 /// If `visit` returns Some, this function returns.
207 /// For visits of non-resumable waiters it returns the return value of `visit`.
208 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
209 /// required information to resume the waiter.
210 /// If all `visit` calls returns None, this function also returns None.
211 #[cfg(parallel_compiler)]
212 fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
214 F: FnMut(Span, Lrc<QueryJob<'tcx>>) -> Option<Option<Waiter<'tcx>>>,
216 // Visit the parent query which is a non-resumable waiter since it's on the same stack
217 if let Some(ref parent) = query.parent {
218 if let Some(cycle) = visit(query.info.span, parent.clone()) {
223 // Visit the explicit waiters which use condvars and are resumable
224 for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() {
225 if let Some(ref waiter_query) = waiter.query {
226 if visit(waiter.span, waiter_query.clone()).is_some() {
227 // Return a value which indicates that this waiter can be resumed
228 return Some(Some((query.clone(), i)));
235 /// Look for query cycles by doing a depth first search starting at `query`.
236 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
237 /// If a cycle is detected, this initial value is replaced with the span causing
239 #[cfg(parallel_compiler)]
240 fn cycle_check<'tcx>(
241 query: Lrc<QueryJob<'tcx>>,
243 stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
244 visited: &mut FxHashSet<*const QueryJob<'tcx>>,
245 ) -> Option<Option<Waiter<'tcx>>> {
246 if !visited.insert(query.as_ptr()) {
247 return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) {
248 // We detected a query cycle, fix up the initial span and return Some
250 // Remove previous stack entries
252 // Replace the span for the first query with the cycle cause
260 // Query marked as visited is added it to the stack
261 stack.push((span, query.clone()));
263 // Visit all the waiters
264 let r = visit_waiters(query, |span, successor| cycle_check(successor, span, stack, visited));
266 // Remove the entry in our stack if we didn't find a cycle
274 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
275 /// from `query` without going through any of the queries in `visited`.
276 /// This is achieved with a depth first search.
277 #[cfg(parallel_compiler)]
278 fn connected_to_root<'tcx>(
279 query: Lrc<QueryJob<'tcx>>,
280 visited: &mut FxHashSet<*const QueryJob<'tcx>>,
282 // We already visited this or we're deliberately ignoring it
283 if !visited.insert(query.as_ptr()) {
287 // This query is connected to the root (it has no query parent), return true
288 if query.parent.is_none() {
292 visit_waiters(query, |_, successor| connected_to_root(successor, visited).then_some(None))
296 // Deterministically pick an query from a list
297 #[cfg(parallel_compiler)]
298 fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc<QueryJob<'tcx>>)>(
303 // Deterministically pick an entry point
304 // FIXME: Sort this instead
305 let mut hcx = tcx.create_stable_hashing_context();
309 let (span, query) = f(v);
310 let mut stable_hasher = StableHasher::new();
311 query.info.query.hash_stable(&mut hcx, &mut stable_hasher);
312 // Prefer entry points which have valid spans for nicer error messages
313 // We add an integer to the tuple ensuring that entry points
314 // with valid spans are picked first
315 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
316 (span_cmp, stable_hasher.finish::<u64>())
321 /// Looks for query cycles starting from the last query in `jobs`.
322 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
323 /// the function return true.
324 /// If a cycle was not found, the starting query is removed from `jobs` and
325 /// the function returns false.
326 #[cfg(parallel_compiler)]
327 fn remove_cycle<'tcx>(
328 jobs: &mut Vec<Lrc<QueryJob<'tcx>>>,
329 wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
332 let mut visited = FxHashSet::default();
333 let mut stack = Vec::new();
334 // Look for a cycle starting with the last query in `jobs`
335 if let Some(waiter) = cycle_check(jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited) {
336 // The stack is a vector of pairs of spans and queries; reverse it so that
337 // the earlier entries require later entries
338 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
340 // Shift the spans so that queries are matched with the span for their waitee
341 spans.rotate_right(1);
343 // Zip them back together
344 let mut stack: Vec<_> = spans.into_iter().zip(queries).collect();
346 // Remove the queries in our cycle from the list of jobs to look at
348 if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) {
353 // Find the queries in the cycle which are
354 // connected to queries outside the cycle
355 let entry_points = stack
357 .filter_map(|(span, query)| {
358 if query.parent.is_none() {
359 // This query is connected to the root (it has no query parent)
360 Some((*span, query.clone(), None))
362 let mut waiters = Vec::new();
363 // Find all the direct waiters who lead to the root
364 visit_waiters(query.clone(), |span, waiter| {
365 // Mark all the other queries in the cycle as already visited
366 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1.as_ptr()));
368 if connected_to_root(waiter.clone(), &mut visited) {
369 waiters.push((span, waiter));
374 if waiters.is_empty() {
377 // Deterministically pick one of the waiters to show to the user
378 let waiter = pick_query(tcx, &waiters, |s| s.clone()).clone();
379 Some((*span, query.clone(), Some(waiter)))
383 .collect::<Vec<(Span, Lrc<QueryJob<'tcx>>, Option<(Span, Lrc<QueryJob<'tcx>>)>)>>();
385 // Deterministically pick an entry point
386 let (_, entry_point, usage) = pick_query(tcx, &entry_points, |e| (e.0, e.1.clone()));
388 // Shift the stack so that our entry point is first
389 let entry_point_pos =
390 stack.iter().position(|(_, query)| query.as_ptr() == entry_point.as_ptr());
391 if let Some(pos) = entry_point_pos {
392 stack.rotate_left(pos);
395 let usage = usage.as_ref().map(|(span, query)| (*span, query.info.query.clone()));
397 // Create the cycle error
398 let error = CycleError {
402 .map(|&(s, ref q)| QueryInfo { span: s, query: q.info.query.clone() })
406 // We unwrap `waiter` here since there must always be one
407 // edge which is resumeable / waited using a query latch
408 let (waitee_query, waiter_idx) = waiter.unwrap();
410 // Extract the waiter we want to resume
411 let waiter = waitee_query.latch.extract_waiter(waiter_idx);
413 // Set the cycle error so it will be picked up when resumed
414 *waiter.cycle.lock() = Some(error);
416 // Put the waiter on the list of things to resume
417 wakelist.push(waiter);
425 /// Creates a new thread and forwards information in thread locals to it.
426 /// The new thread runs the deadlock handler.
427 /// Must only be called when a deadlock is about to happen.
428 #[cfg(parallel_compiler)]
429 pub unsafe fn handle_deadlock() {
430 let registry = rayon_core::Registry::current();
432 let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| gcx_ptr as *const _);
433 let gcx_ptr = &*gcx_ptr;
435 let rustc_span_globals =
436 rustc_span::GLOBALS.with(|rustc_span_globals| rustc_span_globals as *const _);
437 let rustc_span_globals = &*rustc_span_globals;
438 let syntax_globals = syntax::attr::GLOBALS.with(|syntax_globals| syntax_globals as *const _);
439 let syntax_globals = &*syntax_globals;
440 thread::spawn(move || {
441 tls::GCX_PTR.set(gcx_ptr, || {
442 syntax::attr::GLOBALS.set(syntax_globals, || {
444 .set(rustc_span_globals, || tls::with_global(|tcx| deadlock(tcx, ®istry)))
450 /// Detects query cycles by using depth first search over all active query jobs.
451 /// If a query cycle is found it will break the cycle by finding an edge which
452 /// uses a query latch and then resuming that waiter.
453 /// There may be multiple cycles involved in a deadlock, so this searches
454 /// all active queries for cycles before finally resuming all the waiters at once.
455 #[cfg(parallel_compiler)]
456 fn deadlock(tcx: TyCtxt<'_>, registry: &rayon_core::Registry) {
457 let on_panic = OnDrop(|| {
458 eprintln!("deadlock handler panicked, aborting process");
462 let mut wakelist = Vec::new();
463 let mut jobs: Vec<_> = tcx.queries.collect_active_jobs();
465 let mut found_cycle = false;
467 while jobs.len() > 0 {
468 if remove_cycle(&mut jobs, &mut wakelist, tcx) {
473 // Check that a cycle was found. It is possible for a deadlock to occur without
474 // a query cycle if a query which can be waited on uses Rayon to do multithreading
475 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
476 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
477 // which in turn will wait on X causing a deadlock. We have a false dependency from
478 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
479 // only considers the true dependency and won't detect a cycle.
480 assert!(found_cycle);
482 // FIXME: Ensure this won't cause a deadlock before we return
483 for waiter in wakelist.into_iter() {
484 waiter.notify(registry);