1 //! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
3 //! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
6 //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7 //! `RefCell` otherwise.
9 //! `RwLock` is a read-write lock.
10 //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11 //! `RefCell` otherwise.
13 //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
15 //! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
17 //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
18 //! depending on the value of cfg!(parallel_compiler).
20 use crate::owning_ref::{Erased, OwningRef};
21 use std::collections::HashMap;
22 use std::hash::{BuildHasher, Hash};
23 use std::marker::PhantomData;
24 use std::ops::{Deref, DerefMut};
26 pub use std::sync::atomic::Ordering;
27 pub use std::sync::atomic::Ordering::SeqCst;
30 if #[cfg(not(parallel_compiler))] {
31 pub auto trait Send {}
32 pub auto trait Sync {}
34 impl<T: ?Sized> Send for T {}
35 impl<T: ?Sized> Sync for T {}
38 macro_rules! rustc_erase_owner {
45 use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe};
47 /// This is a single threaded variant of AtomicCell provided by crossbeam.
48 /// Unlike `Atomic` this is intended for all `Copy` types,
49 /// but it lacks the explicit ordering arguments.
51 pub struct AtomicCell<T: Copy>(Cell<T>);
53 impl<T: Copy> AtomicCell<T> {
55 pub fn new(v: T) -> Self {
56 AtomicCell(Cell::new(v))
60 pub fn get_mut(&mut self) -> &mut T {
65 impl<T: Copy> AtomicCell<T> {
67 pub fn into_inner(self) -> T {
72 pub fn load(&self) -> T {
77 pub fn store(&self, val: T) {
82 pub fn swap(&self, val: T) -> T {
87 /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
88 /// It differs from `AtomicCell` in that it has explicit ordering arguments
89 /// and is only intended for use with the native atomic types.
90 /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
91 /// as it's not intended to be used separately.
93 pub struct Atomic<T: Copy>(Cell<T>);
95 impl<T: Copy> Atomic<T> {
97 pub fn new(v: T) -> Self {
102 impl<T: Copy> Atomic<T> {
104 pub fn into_inner(self) -> T {
109 pub fn load(&self, _: Ordering) -> T {
114 pub fn store(&self, val: T, _: Ordering) {
119 pub fn swap(&self, val: T, _: Ordering) -> T {
124 impl<T: Copy + PartialEq> Atomic<T> {
126 pub fn compare_exchange(&self,
132 let read = self.0.get();
142 impl<T: Add<Output=T> + Copy> Atomic<T> {
144 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
145 let old = self.0.get();
146 self.0.set(old + val);
151 pub type AtomicUsize = Atomic<usize>;
152 pub type AtomicBool = Atomic<bool>;
153 pub type AtomicU32 = Atomic<u32>;
154 pub type AtomicU64 = Atomic<u64>;
156 pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
157 where A: FnOnce() -> RA,
163 pub struct SerialScope;
166 pub fn spawn<F>(&self, f: F)
167 where F: FnOnce(&SerialScope)
173 pub fn scope<F, R>(f: F) -> R
174 where F: FnOnce(&SerialScope) -> R
180 macro_rules! parallel {
181 ($($blocks:tt),*) => {
182 // We catch panics here ensuring that all the blocks execute.
183 // This makes behavior consistent with the parallel compiler.
184 let mut panic = None;
186 if let Err(p) = ::std::panic::catch_unwind(
187 ::std::panic::AssertUnwindSafe(|| $blocks)
194 if let Some(panic) = panic {
195 ::std::panic::resume_unwind(panic);
200 pub use std::iter::Iterator as ParallelIterator;
202 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
206 pub fn par_for_each_in<T: IntoIterator>(t: T, for_each: impl Fn(T::Item) + Sync + Send) {
207 // We catch panics here ensuring that all the loop iterations execute.
208 // This makes behavior consistent with the parallel compiler.
209 let mut panic = None;
210 t.into_iter().for_each(|i| {
211 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
217 if let Some(panic) = panic {
218 resume_unwind(panic);
222 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
224 pub use std::rc::Rc as Lrc;
225 pub use std::rc::Weak as Weak;
226 pub use std::cell::Ref as ReadGuard;
227 pub use std::cell::Ref as MappedReadGuard;
228 pub use std::cell::RefMut as WriteGuard;
229 pub use std::cell::RefMut as MappedWriteGuard;
230 pub use std::cell::RefMut as LockGuard;
231 pub use std::cell::RefMut as MappedLockGuard;
233 use std::cell::RefCell as InnerRwLock;
234 use std::cell::RefCell as InnerLock;
239 pub struct WorkerLocal<T>(OneThread<T>);
241 impl<T> WorkerLocal<T> {
242 /// Creates a new worker local where the `initial` closure computes the
243 /// value this worker local should take for each thread in the thread pool.
245 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
246 WorkerLocal(OneThread::new(f(0)))
249 /// Returns the worker-local value for each thread
251 pub fn into_inner(self) -> Vec<T> {
252 vec![OneThread::into_inner(self.0)]
256 impl<T> Deref for WorkerLocal<T> {
260 fn deref(&self) -> &T {
265 pub type MTRef<'a, T> = &'a mut T;
267 #[derive(Debug, Default)]
268 pub struct MTLock<T>(T);
272 pub fn new(inner: T) -> Self {
277 pub fn into_inner(self) -> T {
282 pub fn get_mut(&mut self) -> &mut T {
287 pub fn lock(&self) -> &T {
292 pub fn lock_mut(&mut self) -> &mut T {
297 // FIXME: Probably a bad idea (in the threaded case)
298 impl<T: Clone> Clone for MTLock<T> {
300 fn clone(&self) -> Self {
301 MTLock(self.0.clone())
305 pub use std::marker::Send as Send;
306 pub use std::marker::Sync as Sync;
308 pub use parking_lot::RwLockReadGuard as ReadGuard;
309 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
310 pub use parking_lot::RwLockWriteGuard as WriteGuard;
311 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
313 pub use parking_lot::MutexGuard as LockGuard;
314 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
316 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
318 pub use crossbeam_utils::atomic::AtomicCell;
320 pub use std::sync::Arc as Lrc;
321 pub use std::sync::Weak as Weak;
323 pub type MTRef<'a, T> = &'a T;
325 #[derive(Debug, Default)]
326 pub struct MTLock<T>(Lock<T>);
330 pub fn new(inner: T) -> Self {
331 MTLock(Lock::new(inner))
335 pub fn into_inner(self) -> T {
340 pub fn get_mut(&mut self) -> &mut T {
345 pub fn lock(&self) -> LockGuard<'_, T> {
350 pub fn lock_mut(&self) -> LockGuard<'_, T> {
355 use parking_lot::Mutex as InnerLock;
356 use parking_lot::RwLock as InnerRwLock;
360 pub use rayon::{join, scope};
362 /// Runs a list of blocks in parallel. The first block is executed immediately on
363 /// the current thread. Use that for the longest running block.
365 macro_rules! parallel {
366 (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
367 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
369 (impl $fblock:tt [$($blocks:tt,)*] []) => {
370 ::rustc_data_structures::sync::scope(|s| {
372 s.spawn(|_| $blocks);
377 ($fblock:tt, $($blocks:tt),*) => {
378 // Reverse the order of the later blocks since Rayon executes them in reverse order
379 // when using a single thread. This ensures the execution order matches that
380 // of a single threaded rustc
381 parallel!(impl $fblock [] [$($blocks),*]);
385 pub use rayon_core::WorkerLocal;
387 pub use rayon::iter::ParallelIterator;
388 use rayon::iter::IntoParallelIterator;
390 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
394 pub fn par_for_each_in<T: IntoParallelIterator>(
396 for_each: impl Fn(T::Item) + Sync + Send,
398 t.into_par_iter().for_each(for_each)
401 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
403 /// This makes locks panic if they are already held.
404 /// It is only useful when you are running in a single thread
405 const ERROR_CHECKING: bool = false;
408 macro_rules! rustc_erase_owner {
411 ::rustc_data_structures::sync::assert_send_val(&v);
412 v.erase_send_sync_owner()
418 pub fn assert_sync<T: ?Sized + Sync>() {}
419 pub fn assert_send<T: ?Sized + Send>() {}
420 pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
421 pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
423 pub trait HashMapExt<K, V> {
424 /// Same as HashMap::insert, but it may panic if there's already an
425 /// entry for `key` with a value not equal to `value`
426 fn insert_same(&mut self, key: K, value: V);
429 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
430 fn insert_same(&mut self, key: K, value: V) {
431 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
435 /// A type whose inner value can be written once and then will stay read-only
436 // This contains a PhantomData<T> since this type conceptually owns a T outside the Mutex once
437 // initialized. This ensures that Once<T> is Sync only if T is. If we did not have PhantomData<T>
438 // we could send a &Once<Cell<bool>> to multiple threads and call `get` on it to get access
439 // to &Cell<bool> on those threads.
440 pub struct Once<T>(Lock<Option<T>>, PhantomData<T>);
443 /// Creates an Once value which is uninitialized
445 pub fn new() -> Self {
446 Once(Lock::new(None), PhantomData)
449 /// Consumes the value and returns Some(T) if it was initialized
451 pub fn into_inner(self) -> Option<T> {
455 /// Tries to initialize the inner value to `value`.
456 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
457 /// otherwise if the inner value was already set it returns `value` back to the caller
459 pub fn try_set(&self, value: T) -> Option<T> {
460 let mut lock = self.0.lock();
468 /// Tries to initialize the inner value to `value`.
469 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
470 /// otherwise if the inner value was already set it asserts that `value` is equal to the inner
471 /// value and then returns `value` back to the caller
473 pub fn try_set_same(&self, value: T) -> Option<T>
477 let mut lock = self.0.lock();
478 if let Some(ref inner) = *lock {
479 assert!(*inner == value);
486 /// Tries to initialize the inner value to `value` and panics if it was already initialized
488 pub fn set(&self, value: T) {
489 assert!(self.try_set(value).is_none());
492 /// Initializes the inner value if it wasn't already done by calling the provided closure. It
493 /// ensures that no-one else can access the value in the mean time by holding a lock for the
494 /// duration of the closure.
495 /// A reference to the inner value is returned.
497 pub fn init_locking<F: FnOnce() -> T>(&self, f: F) -> &T {
499 let mut lock = self.0.lock();
508 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
509 /// else can access it. This mean when this is called from multiple threads, multiple
510 /// closures may concurrently be computing a value which the inner value should take.
511 /// Only one of these closures are used to actually initialize the value.
512 /// If some other closure already set the value,
513 /// we return the value our closure computed wrapped in a `Option`.
514 /// If our closure set the value, `None` is returned.
515 /// If the value is already initialized, the closure is not called and `None` is returned.
517 pub fn init_nonlocking<F: FnOnce() -> T>(&self, f: F) -> Option<T> {
518 if self.0.lock().is_some() { None } else { self.try_set(f()) }
521 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
522 /// else can access it. This mean when this is called from multiple threads, multiple
523 /// closures may concurrently be computing a value which the inner value should take.
524 /// Only one of these closures are used to actually initialize the value.
525 /// If some other closure already set the value, we assert that it our closure computed
526 /// a value equal to the value already set and then
527 /// we return the value our closure computed wrapped in a `Option`.
528 /// If our closure set the value, `None` is returned.
529 /// If the value is already initialized, the closure is not called and `None` is returned.
531 pub fn init_nonlocking_same<F: FnOnce() -> T>(&self, f: F) -> Option<T>
535 if self.0.lock().is_some() { None } else { self.try_set_same(f()) }
538 /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized
540 pub fn try_get(&self) -> Option<&T> {
541 let lock = &*self.0.lock();
542 if let Some(ref inner) = *lock {
543 // This is safe since we won't mutate the inner value
544 unsafe { Some(&*(inner as *const T)) }
550 /// Gets reference to the inner value, panics if it is not yet initialized
552 pub fn get(&self) -> &T {
553 self.try_get().expect("value was not set")
556 /// Gets reference to the inner value, panics if it is not yet initialized
558 pub fn borrow(&self) -> &T {
564 pub struct Lock<T>(InnerLock<T>);
568 pub fn new(inner: T) -> Self {
569 Lock(InnerLock::new(inner))
573 pub fn into_inner(self) -> T {
578 pub fn get_mut(&mut self) -> &mut T {
582 #[cfg(parallel_compiler)]
584 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
588 #[cfg(not(parallel_compiler))]
590 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
591 self.0.try_borrow_mut().ok()
594 #[cfg(parallel_compiler)]
596 pub fn lock(&self) -> LockGuard<'_, T> {
598 self.0.try_lock().expect("lock was already held")
604 #[cfg(not(parallel_compiler))]
606 pub fn lock(&self) -> LockGuard<'_, T> {
611 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
616 pub fn borrow(&self) -> LockGuard<'_, T> {
621 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
626 impl<T: Default> Default for Lock<T> {
628 fn default() -> Self {
629 Lock::new(T::default())
633 // FIXME: Probably a bad idea
634 impl<T: Clone> Clone for Lock<T> {
636 fn clone(&self) -> Self {
637 Lock::new(self.borrow().clone())
642 pub struct RwLock<T>(InnerRwLock<T>);
646 pub fn new(inner: T) -> Self {
647 RwLock(InnerRwLock::new(inner))
651 pub fn into_inner(self) -> T {
656 pub fn get_mut(&mut self) -> &mut T {
660 #[cfg(not(parallel_compiler))]
662 pub fn read(&self) -> ReadGuard<'_, T> {
666 #[cfg(parallel_compiler)]
668 pub fn read(&self) -> ReadGuard<'_, T> {
670 self.0.try_read().expect("lock was already held")
677 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
681 #[cfg(not(parallel_compiler))]
683 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
684 self.0.try_borrow_mut().map_err(|_| ())
687 #[cfg(parallel_compiler)]
689 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
690 self.0.try_write().ok_or(())
693 #[cfg(not(parallel_compiler))]
695 pub fn write(&self) -> WriteGuard<'_, T> {
699 #[cfg(parallel_compiler)]
701 pub fn write(&self) -> WriteGuard<'_, T> {
703 self.0.try_write().expect("lock was already held")
710 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
711 f(&mut *self.write())
715 pub fn borrow(&self) -> ReadGuard<'_, T> {
720 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
725 // FIXME: Probably a bad idea
726 impl<T: Clone> Clone for RwLock<T> {
728 fn clone(&self) -> Self {
729 RwLock::new(self.borrow().clone())
733 /// A type which only allows its inner value to be used in one thread.
734 /// It will panic if it is used on multiple threads.
736 pub struct OneThread<T> {
737 #[cfg(parallel_compiler)]
738 thread: thread::ThreadId,
742 #[cfg(parallel_compiler)]
743 unsafe impl<T> std::marker::Sync for OneThread<T> {}
744 #[cfg(parallel_compiler)]
745 unsafe impl<T> std::marker::Send for OneThread<T> {}
747 impl<T> OneThread<T> {
750 #[cfg(parallel_compiler)]
751 assert_eq!(thread::current().id(), self.thread);
755 pub fn new(inner: T) -> Self {
757 #[cfg(parallel_compiler)]
758 thread: thread::current().id(),
764 pub fn into_inner(value: Self) -> T {
770 impl<T> Deref for OneThread<T> {
773 fn deref(&self) -> &T {
779 impl<T> DerefMut for OneThread<T> {
780 fn deref_mut(&mut self) -> &mut T {