1 // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
14 use rustc_data_structures::fx::FxHashSet;
15 use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak};
16 use rustc_data_structures::OnDrop;
20 use ty::query::plumbing::CycleError;
21 use ty::context::TyCtxt;
22 use errors::Diagnostic;
26 #[cfg(parallel_queries)]
29 parking_lot::{Mutex, Condvar},
30 std::sync::atomic::Ordering,
33 std::iter::FromIterator,
35 rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable},
38 /// Indicates the state of a query for a given key in a query map
39 pub(super) enum QueryResult<'tcx> {
40 /// An already executing query. The query job can be used to await for its completion
41 Started(Lrc<QueryJob<'tcx>>),
43 /// The query panicked. Queries trying to wait on this will raise a fatal error / silently panic
47 /// A span and a query key
48 #[derive(Clone, Debug)]
49 pub struct QueryInfo<'tcx> {
50 /// The span for a reason this query was required
52 pub query: Query<'tcx>,
55 /// A object representing an active query job.
56 pub struct QueryJob<'tcx> {
57 pub info: QueryInfo<'tcx>,
59 /// The parent query job which created this job and is implicitly waiting on it.
60 pub parent: Option<Lrc<QueryJob<'tcx>>>,
62 /// Diagnostic messages which are emitted while the query executes
63 pub diagnostics: Lock<Vec<Diagnostic>>,
65 /// The latch which is used to wait on this job
66 #[cfg(parallel_queries)]
67 latch: QueryLatch<'tcx>,
70 impl<'tcx> QueryJob<'tcx> {
71 /// Creates a new query job
72 pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
74 diagnostics: Lock::new(Vec::new()),
77 #[cfg(parallel_queries)]
78 latch: QueryLatch::new(),
82 /// Awaits for the query job to complete.
84 /// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
85 /// query that means that there is a query cycle, thus this always running a cycle error.
86 pub(super) fn await<'lcx>(
88 tcx: TyCtxt<'_, 'tcx, 'lcx>,
90 ) -> Result<(), CycleError<'tcx>> {
91 #[cfg(not(parallel_queries))]
93 self.find_cycle_in_stack(tcx, span)
96 #[cfg(parallel_queries)]
98 tls::with_related_context(tcx, move |icx| {
99 let mut waiter = Lrc::new(QueryWaiter {
100 query: icx.query.clone(),
102 cycle: Lock::new(None),
103 condvar: Condvar::new(),
105 self.latch.await(&waiter);
107 match Lrc::get_mut(&mut waiter).unwrap().cycle.get_mut().take() {
109 Some(cycle) => Err(cycle)
115 #[cfg(not(parallel_queries))]
116 fn find_cycle_in_stack<'lcx>(
118 tcx: TyCtxt<'_, 'tcx, 'lcx>,
120 ) -> Result<(), CycleError<'tcx>> {
121 // Get the current executing query (waiter) and find the waitee amongst its parents
122 let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone());
123 let mut cycle = Vec::new();
125 while let Some(job) = current_job {
126 cycle.push(job.info.clone());
128 if ptr::eq(&*job, self) {
131 // This is the end of the cycle
132 // The span entry we included was for the usage
133 // of the cycle itself, and not part of the cycle
134 // Replace it with the span which caused the cycle to form
135 cycle[0].span = span;
136 // Find out why the cycle itself was used
137 let usage = job.parent.as_ref().map(|parent| {
138 (job.info.span, parent.info.query.clone())
140 return Err(CycleError { usage, cycle });
143 current_job = job.parent.clone();
146 panic!("did not find a cycle")
149 /// Signals to waiters that the query is complete.
151 /// This does nothing for single threaded rustc,
152 /// as there are no concurrent jobs which could be waiting on us
153 pub fn signal_complete(&self) {
154 #[cfg(parallel_queries)]
158 fn as_ptr(&self) -> *const QueryJob<'tcx> {
163 #[cfg(parallel_queries)]
164 struct QueryWaiter<'tcx> {
165 query: Option<Lrc<QueryJob<'tcx>>>,
168 cycle: Lock<Option<CycleError<'tcx>>>,
171 #[cfg(parallel_queries)]
172 impl<'tcx> QueryWaiter<'tcx> {
173 fn notify(&self, registry: &rayon_core::Registry) {
174 rayon_core::mark_unblocked(registry);
175 self.condvar.notify_one();
179 #[cfg(parallel_queries)]
180 struct QueryLatchInfo<'tcx> {
182 waiters: Vec<Lrc<QueryWaiter<'tcx>>>,
185 #[cfg(parallel_queries)]
186 struct QueryLatch<'tcx> {
187 info: Mutex<QueryLatchInfo<'tcx>>,
190 #[cfg(parallel_queries)]
191 impl<'tcx> QueryLatch<'tcx> {
194 info: Mutex::new(QueryLatchInfo {
201 /// Awaits the caller on this latch by blocking the current thread.
202 fn await(&self, waiter: &Lrc<QueryWaiter<'tcx>>) {
203 let mut info = self.info.lock();
205 // We push the waiter on to the `waiters` list. It can be accessed inside
206 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
207 // Both of these will remove it from the `waiters` list before resuming
209 info.waiters.push(waiter.clone());
211 // If this detects a deadlock and the deadlock handler wants to resume this thread
212 // we have to be in the `wait` call. This is ensured by the deadlock handler
213 // getting the self.info lock.
214 rayon_core::mark_blocked();
215 waiter.condvar.wait(&mut info);
219 /// Sets the latch and resumes all waiters on it
221 let mut info = self.info.lock();
222 debug_assert!(!info.complete);
223 info.complete = true;
224 let registry = rayon_core::Registry::current();
225 for waiter in info.waiters.drain(..) {
226 waiter.notify(®istry);
230 /// Remove a single waiter from the list of waiters.
231 /// This is used to break query cycles.
235 ) -> Lrc<QueryWaiter<'tcx>> {
236 let mut info = self.info.lock();
237 debug_assert!(!info.complete);
238 // Remove the waiter from the list of waiters
239 info.waiters.remove(waiter)
243 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
244 #[cfg(parallel_queries)]
245 type Waiter<'tcx> = (Lrc<QueryJob<'tcx>>, usize);
247 /// Visits all the non-resumable and resumable waiters of a query.
248 /// Only waiters in a query are visited.
249 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
250 /// and a span indicating the reason the query waited on `query_ref`.
251 /// If `visit` returns Some, this function returns.
252 /// For visits of non-resumable waiters it returns the return value of `visit`.
253 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
254 /// required information to resume the waiter.
255 /// If all `visit` calls returns None, this function also returns None.
256 #[cfg(parallel_queries)]
257 fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
259 F: FnMut(Span, Lrc<QueryJob<'tcx>>) -> Option<Option<Waiter<'tcx>>>
261 // Visit the parent query which is a non-resumable waiter since it's on the same stack
262 if let Some(ref parent) = query.parent {
263 if let Some(cycle) = visit(query.info.span, parent.clone()) {
268 // Visit the explicit waiters which use condvars and are resumable
269 for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() {
270 if let Some(ref waiter_query) = waiter.query {
271 if visit(waiter.span, waiter_query.clone()).is_some() {
272 // Return a value which indicates that this waiter can be resumed
273 return Some(Some((query.clone(), i)));
280 /// Look for query cycles by doing a depth first search starting at `query`.
281 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
282 /// If a cycle is detected, this initial value is replaced with the span causing
284 #[cfg(parallel_queries)]
285 fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
287 stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
288 visited: &mut FxHashSet<*const QueryJob<'tcx>>
289 ) -> Option<Option<Waiter<'tcx>>> {
290 if visited.contains(&query.as_ptr()) {
291 return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) {
292 // We detected a query cycle, fix up the initial span and return Some
294 // Remove previous stack entries
295 stack.splice(0..p, iter::empty());
296 // Replace the span for the first query with the cycle cause
304 // Mark this query is visited and add it to the stack
305 visited.insert(query.as_ptr());
306 stack.push((span, query.clone()));
308 // Visit all the waiters
309 let r = visit_waiters(query, |span, successor| {
310 cycle_check(successor, span, stack, visited)
313 // Remove the entry in our stack if we didn't find a cycle
321 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
322 /// from `query` without going through any of the queries in `visited`.
323 /// This is achieved with a depth first search.
324 #[cfg(parallel_queries)]
325 fn connected_to_root<'tcx>(
326 query: Lrc<QueryJob<'tcx>>,
327 visited: &mut FxHashSet<*const QueryJob<'tcx>>
329 // We already visited this or we're deliberately ignoring it
330 if visited.contains(&query.as_ptr()) {
334 // This query is connected to the root (it has no query parent), return true
335 if query.parent.is_none() {
339 visited.insert(query.as_ptr());
341 visit_waiters(query, |_, successor| {
342 if connected_to_root(successor, visited) {
350 // Deterministically pick an query from a list
351 #[cfg(parallel_queries)]
352 fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc<QueryJob<'tcx>>)>(
353 tcx: TyCtxt<'_, 'tcx, '_>,
357 // Deterministically pick an entry point
358 // FIXME: Sort this instead
359 let mut hcx = tcx.create_stable_hashing_context();
360 queries.iter().min_by_key(|v| {
361 let (span, query) = f(v);
362 let mut stable_hasher = StableHasher::<u64>::new();
363 query.info.query.hash_stable(&mut hcx, &mut stable_hasher);
364 // Prefer entry points which have valid spans for nicer error messages
365 // We add an integer to the tuple ensuring that entry points
366 // with valid spans are picked first
367 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
368 (span_cmp, stable_hasher.finish())
372 /// Looks for query cycles starting from the last query in `jobs`.
373 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
374 /// the function return true.
375 /// If a cycle was not found, the starting query is removed from `jobs` and
376 /// the function returns false.
377 #[cfg(parallel_queries)]
378 fn remove_cycle<'tcx>(
379 jobs: &mut Vec<Lrc<QueryJob<'tcx>>>,
380 wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
381 tcx: TyCtxt<'_, 'tcx, '_>
383 let mut visited = FxHashSet::default();
384 let mut stack = Vec::new();
385 // Look for a cycle starting with the last query in `jobs`
386 if let Some(waiter) = cycle_check(jobs.pop().unwrap(),
390 // Reverse the stack so earlier entries require later entries
393 // The stack is a vector of pairs of spans and queries
394 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().unzip();
396 // Shift the spans so that queries are matched with the span for their waitee
397 spans.rotate_right(1);
399 // Zip them back together
400 let mut stack: Vec<_> = spans.into_iter().zip(queries).collect();
402 // Remove the queries in our cycle from the list of jobs to look at
404 if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) {
409 // Find the queries in the cycle which are
410 // connected to queries outside the cycle
411 let entry_points: Vec<_> = stack.iter().filter_map(|(span, query)| {
412 if query.parent.is_none() {
413 // This query is connected to the root (it has no query parent)
414 Some((*span, query.clone(), None))
416 let mut waiters = Vec::new();
417 // Find all the direct waiters who lead to the root
418 visit_waiters(query.clone(), |span, waiter| {
419 // Mark all the other queries in the cycle as already visited
420 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1.as_ptr()));
422 if connected_to_root(waiter.clone(), &mut visited) {
423 waiters.push((span, waiter));
428 if waiters.is_empty() {
431 // Deterministically pick one of the waiters to show to the user
432 let waiter = pick_query(tcx, &waiters, |s| s.clone()).clone();
433 Some((*span, query.clone(), Some(waiter)))
438 let entry_points: Vec<(Span, Lrc<QueryJob<'tcx>>, Option<(Span, Lrc<QueryJob<'tcx>>)>)>
441 // Deterministically pick an entry point
442 let (_, entry_point, usage) = pick_query(tcx, &entry_points, |e| (e.0, e.1.clone()));
444 // Shift the stack so that our entry point is first
445 let entry_point_pos = stack.iter().position(|(_, query)| {
446 query.as_ptr() == entry_point.as_ptr()
448 if let Some(pos) = entry_point_pos {
449 stack.rotate_left(pos);
452 let usage = usage.as_ref().map(|(span, query)| (*span, query.info.query.clone()));
454 // Create the cycle error
455 let mut error = CycleError {
457 cycle: stack.iter().map(|&(s, ref q)| QueryInfo {
459 query: q.info.query.clone(),
463 // We unwrap `waiter` here since there must always be one
464 // edge which is resumeable / waited using a query latch
465 let (waitee_query, waiter_idx) = waiter.unwrap();
467 // Extract the waiter we want to resume
468 let waiter = waitee_query.latch.extract_waiter(waiter_idx);
470 // Set the cycle error so it will be picked up when resumed
471 *waiter.cycle.lock() = Some(error);
473 // Put the waiter on the list of things to resume
474 wakelist.push(waiter);
482 /// Creates a new thread and forwards information in thread locals to it.
483 /// The new thread runs the deadlock handler.
484 /// Must only be called when a deadlock is about to happen.
485 #[cfg(parallel_queries)]
486 pub unsafe fn handle_deadlock() {
490 let registry = rayon_core::Registry::current();
492 let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| {
495 let gcx_ptr = &*gcx_ptr;
497 let syntax_globals = syntax::GLOBALS.with(|syntax_globals| {
498 syntax_globals as *const _
500 let syntax_globals = &*syntax_globals;
502 let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| {
503 syntax_pos_globals as *const _
505 let syntax_pos_globals = &*syntax_pos_globals;
506 thread::spawn(move || {
507 tls::GCX_PTR.set(gcx_ptr, || {
508 syntax_pos::GLOBALS.set(syntax_pos_globals, || {
509 syntax_pos::GLOBALS.set(syntax_pos_globals, || {
510 tls::with_thread_locals(|| {
511 tls::with_global(|tcx| deadlock(tcx, ®istry))
519 /// Detects query cycles by using depth first search over all active query jobs.
520 /// If a query cycle is found it will break the cycle by finding an edge which
521 /// uses a query latch and then resuming that waiter.
522 /// There may be multiple cycles involved in a deadlock, so this searches
523 /// all active queries for cycles before finally resuming all the waiters at once.
524 #[cfg(parallel_queries)]
525 fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) {
526 let on_panic = OnDrop(|| {
527 eprintln!("deadlock handler panicked, aborting process");
531 let mut wakelist = Vec::new();
532 let mut jobs: Vec<_> = tcx.queries.collect_active_jobs();
534 let mut found_cycle = false;
536 while jobs.len() > 0 {
537 if remove_cycle(&mut jobs, &mut wakelist, tcx) {
542 // Check that a cycle was found. It is possible for a deadlock to occur without
543 // a query cycle if a query which can be waited on uses Rayon to do multithreading
544 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
545 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
546 // which in turn will wait on X causing a deadlock. We have a false dependency from
547 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
548 // only considers the true dependency and won't detect a cycle.
549 assert!(found_cycle);
551 // FIXME: Ensure this won't cause a deadlock before we return
552 for waiter in wakelist.into_iter() {
553 waiter.notify(registry);