1 //! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
3 //! `Lrc` is an alias of either Rc or Arc.
6 //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7 //! `RefCell` otherwise.
9 //! `RwLock` is a read-write lock.
10 //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11 //! `RefCell` otherwise.
13 //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
15 //! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise.
17 //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
18 //! depending on the value of cfg!(parallel_compiler).
20 use std::collections::HashMap;
21 use std::hash::{Hash, BuildHasher};
22 use std::marker::PhantomData;
23 use std::ops::{Deref, DerefMut};
24 use crate::owning_ref::{Erased, OwningRef};
26 pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
27 where A: FnOnce() -> RA,
33 pub struct SerialScope;
36 pub fn spawn<F>(&self, f: F)
37 where F: FnOnce(&SerialScope)
43 pub fn serial_scope<F, R>(f: F) -> R
44 where F: FnOnce(&SerialScope) -> R
49 pub use std::sync::atomic::Ordering::SeqCst;
50 pub use std::sync::atomic::Ordering;
53 if #[cfg(not(parallel_compiler))] {
54 pub auto trait Send {}
55 pub auto trait Sync {}
57 impl<T: ?Sized> Send for T {}
58 impl<T: ?Sized> Sync for T {}
61 macro_rules! rustc_erase_owner {
68 use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe};
70 /// This is a single threaded variant of AtomicCell provided by crossbeam.
71 /// Unlike `Atomic` this is intended for all `Copy` types,
72 /// but it lacks the explicit ordering arguments.
74 pub struct AtomicCell<T: Copy>(Cell<T>);
76 impl<T: Copy> AtomicCell<T> {
78 pub fn new(v: T) -> Self {
79 AtomicCell(Cell::new(v))
83 pub fn get_mut(&mut self) -> &mut T {
88 impl<T: Copy> AtomicCell<T> {
90 pub fn into_inner(self) -> T {
95 pub fn load(&self) -> T {
100 pub fn store(&self, val: T) {
105 pub fn swap(&self, val: T) -> T {
110 /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
111 /// It differs from `AtomicCell` in that it has explicit ordering arguments
112 /// and is only intended for use with the native atomic types.
113 /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
114 /// as it's not intended to be used separately.
116 pub struct Atomic<T: Copy>(Cell<T>);
118 impl<T: Copy> Atomic<T> {
120 pub fn new(v: T) -> Self {
125 impl<T: Copy> Atomic<T> {
127 pub fn into_inner(self) -> T {
132 pub fn load(&self, _: Ordering) -> T {
137 pub fn store(&self, val: T, _: Ordering) {
142 pub fn swap(&self, val: T, _: Ordering) -> T {
147 impl<T: Copy + PartialEq> Atomic<T> {
149 pub fn compare_exchange(&self,
155 let read = self.0.get();
165 impl<T: Add<Output=T> + Copy> Atomic<T> {
167 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
168 let old = self.0.get();
169 self.0.set(old + val);
174 pub type AtomicUsize = Atomic<usize>;
175 pub type AtomicBool = Atomic<bool>;
176 pub type AtomicU32 = Atomic<u32>;
177 pub type AtomicU64 = Atomic<u64>;
179 pub use self::serial_join as join;
180 pub use self::serial_scope as scope;
183 macro_rules! parallel {
184 ($($blocks:tt),*) => {
185 // We catch panics here ensuring that all the blocks execute.
186 // This makes behavior consistent with the parallel compiler.
187 let mut panic = None;
189 if let Err(p) = ::std::panic::catch_unwind(
190 ::std::panic::AssertUnwindSafe(|| $blocks)
197 if let Some(panic) = panic {
198 ::std::panic::resume_unwind(panic);
203 pub use std::iter::Iterator as ParallelIterator;
205 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
209 pub fn par_for_each_in<T: IntoIterator>(
212 impl Fn(<<T as IntoIterator>::IntoIter as Iterator>::Item) + Sync + Send
214 // We catch panics here ensuring that all the loop iterations execute.
215 // This makes behavior consistent with the parallel compiler.
216 let mut panic = None;
217 t.into_iter().for_each(|i| {
218 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
224 if let Some(panic) = panic {
225 resume_unwind(panic);
229 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
231 pub use std::rc::Rc as Lrc;
232 pub use std::rc::Weak as Weak;
233 pub use std::cell::Ref as ReadGuard;
234 pub use std::cell::Ref as MappedReadGuard;
235 pub use std::cell::RefMut as WriteGuard;
236 pub use std::cell::RefMut as MappedWriteGuard;
237 pub use std::cell::RefMut as LockGuard;
238 pub use std::cell::RefMut as MappedLockGuard;
240 use std::cell::RefCell as InnerRwLock;
241 use std::cell::RefCell as InnerLock;
246 pub struct WorkerLocal<T>(OneThread<T>);
248 impl<T> WorkerLocal<T> {
249 /// Creates a new worker local where the `initial` closure computes the
250 /// value this worker local should take for each thread in the thread pool.
252 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
253 WorkerLocal(OneThread::new(f(0)))
256 /// Returns the worker-local value for each thread
258 pub fn into_inner(self) -> Vec<T> {
259 vec![OneThread::into_inner(self.0)]
263 impl<T> Deref for WorkerLocal<T> {
267 fn deref(&self) -> &T {
272 pub type MTRef<'a, T> = &'a mut T;
274 #[derive(Debug, Default)]
275 pub struct MTLock<T>(T);
279 pub fn new(inner: T) -> Self {
284 pub fn into_inner(self) -> T {
289 pub fn get_mut(&mut self) -> &mut T {
294 pub fn lock(&self) -> &T {
299 pub fn lock_mut(&mut self) -> &mut T {
304 // FIXME: Probably a bad idea (in the threaded case)
305 impl<T: Clone> Clone for MTLock<T> {
307 fn clone(&self) -> Self {
308 MTLock(self.0.clone())
312 pub use std::marker::Send as Send;
313 pub use std::marker::Sync as Sync;
315 pub use parking_lot::RwLockReadGuard as ReadGuard;
316 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
317 pub use parking_lot::RwLockWriteGuard as WriteGuard;
318 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
320 pub use parking_lot::MutexGuard as LockGuard;
321 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
323 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
325 pub use crossbeam_utils::atomic::AtomicCell;
327 pub use std::sync::Arc as Lrc;
328 pub use std::sync::Weak as Weak;
330 pub type MTRef<'a, T> = &'a T;
332 #[derive(Debug, Default)]
333 pub struct MTLock<T>(Lock<T>);
337 pub fn new(inner: T) -> Self {
338 MTLock(Lock::new(inner))
342 pub fn into_inner(self) -> T {
347 pub fn get_mut(&mut self) -> &mut T {
352 pub fn lock(&self) -> LockGuard<'_, T> {
357 pub fn lock_mut(&self) -> LockGuard<'_, T> {
362 use parking_lot::Mutex as InnerLock;
363 use parking_lot::RwLock as InnerRwLock;
367 pub use rayon::{join, scope};
369 /// Runs a list of blocks in parallel. The first block is executed immediately on
370 /// the current thread. Use that for the longest running block.
372 macro_rules! parallel {
373 (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
374 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
376 (impl $fblock:tt [$($blocks:tt,)*] []) => {
377 ::rustc_data_structures::sync::scope(|s| {
379 s.spawn(|_| $blocks);
384 ($fblock:tt, $($blocks:tt),*) => {
385 // Reverse the order of the later blocks since Rayon executes them in reverse order
386 // when using a single thread. This ensures the execution order matches that
387 // of a single threaded rustc
388 parallel!(impl $fblock [] [$($blocks),*]);
392 pub use rayon_core::WorkerLocal;
394 pub use rayon::iter::ParallelIterator;
395 use rayon::iter::IntoParallelIterator;
397 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
401 pub fn par_for_each_in<T: IntoParallelIterator>(
404 <<T as IntoParallelIterator>::Iter as ParallelIterator>::Item
407 t.into_par_iter().for_each(for_each)
410 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
412 /// This makes locks panic if they are already held.
413 /// It is only useful when you are running in a single thread
414 const ERROR_CHECKING: bool = false;
417 macro_rules! rustc_erase_owner {
420 ::rustc_data_structures::sync::assert_send_val(&v);
421 v.erase_send_sync_owner()
427 pub fn assert_sync<T: ?Sized + Sync>() {}
428 pub fn assert_send<T: ?Sized + Send>() {}
429 pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
430 pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
432 pub trait HashMapExt<K, V> {
433 /// Same as HashMap::insert, but it may panic if there's already an
434 /// entry for `key` with a value not equal to `value`
435 fn insert_same(&mut self, key: K, value: V);
438 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
439 fn insert_same(&mut self, key: K, value: V) {
440 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
444 /// A type whose inner value can be written once and then will stay read-only
445 // This contains a PhantomData<T> since this type conceptually owns a T outside the Mutex once
446 // initialized. This ensures that Once<T> is Sync only if T is. If we did not have PhantomData<T>
447 // we could send a &Once<Cell<bool>> to multiple threads and call `get` on it to get access
448 // to &Cell<bool> on those threads.
449 pub struct Once<T>(Lock<Option<T>>, PhantomData<T>);
452 /// Creates an Once value which is uninitialized
454 pub fn new() -> Self {
455 Once(Lock::new(None), PhantomData)
458 /// Consumes the value and returns Some(T) if it was initialized
460 pub fn into_inner(self) -> Option<T> {
464 /// Tries to initialize the inner value to `value`.
465 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
466 /// otherwise if the inner value was already set it returns `value` back to the caller
468 pub fn try_set(&self, value: T) -> Option<T> {
469 let mut lock = self.0.lock();
477 /// Tries to initialize the inner value to `value`.
478 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
479 /// otherwise if the inner value was already set it asserts that `value` is equal to the inner
480 /// value and then returns `value` back to the caller
482 pub fn try_set_same(&self, value: T) -> Option<T> where T: Eq {
483 let mut lock = self.0.lock();
484 if let Some(ref inner) = *lock {
485 assert!(*inner == value);
492 /// Tries to initialize the inner value to `value` and panics if it was already initialized
494 pub fn set(&self, value: T) {
495 assert!(self.try_set(value).is_none());
498 /// Tries to initialize the inner value by calling the closure while ensuring that no-one else
499 /// can access the value in the mean time by holding a lock for the duration of the closure.
500 /// If the value was already initialized the closure is not called and `false` is returned,
501 /// otherwise if the value from the closure initializes the inner value, `true` is returned
503 pub fn init_locking<F: FnOnce() -> T>(&self, f: F) -> bool {
504 let mut lock = self.0.lock();
512 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
513 /// else can access it. This mean when this is called from multiple threads, multiple
514 /// closures may concurrently be computing a value which the inner value should take.
515 /// Only one of these closures are used to actually initialize the value.
516 /// If some other closure already set the value,
517 /// we return the value our closure computed wrapped in a `Option`.
518 /// If our closure set the value, `None` is returned.
519 /// If the value is already initialized, the closure is not called and `None` is returned.
521 pub fn init_nonlocking<F: FnOnce() -> T>(&self, f: F) -> Option<T> {
522 if self.0.lock().is_some() {
529 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
530 /// else can access it. This mean when this is called from multiple threads, multiple
531 /// closures may concurrently be computing a value which the inner value should take.
532 /// Only one of these closures are used to actually initialize the value.
533 /// If some other closure already set the value, we assert that it our closure computed
534 /// a value equal to the value already set and then
535 /// we return the value our closure computed wrapped in a `Option`.
536 /// If our closure set the value, `None` is returned.
537 /// If the value is already initialized, the closure is not called and `None` is returned.
539 pub fn init_nonlocking_same<F: FnOnce() -> T>(&self, f: F) -> Option<T> where T: Eq {
540 if self.0.lock().is_some() {
543 self.try_set_same(f())
547 /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized
549 pub fn try_get(&self) -> Option<&T> {
550 let lock = &*self.0.lock();
551 if let Some(ref inner) = *lock {
552 // This is safe since we won't mutate the inner value
553 unsafe { Some(&*(inner as *const T)) }
559 /// Gets reference to the inner value, panics if it is not yet initialized
561 pub fn get(&self) -> &T {
562 self.try_get().expect("value was not set")
565 /// Gets reference to the inner value, panics if it is not yet initialized
567 pub fn borrow(&self) -> &T {
573 pub struct Lock<T>(InnerLock<T>);
577 pub fn new(inner: T) -> Self {
578 Lock(InnerLock::new(inner))
582 pub fn into_inner(self) -> T {
587 pub fn get_mut(&mut self) -> &mut T {
591 #[cfg(parallel_compiler)]
593 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
597 #[cfg(not(parallel_compiler))]
599 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
600 self.0.try_borrow_mut().ok()
603 #[cfg(parallel_compiler)]
605 pub fn lock(&self) -> LockGuard<'_, T> {
607 self.0.try_lock().expect("lock was already held")
613 #[cfg(not(parallel_compiler))]
615 pub fn lock(&self) -> LockGuard<'_, T> {
620 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
625 pub fn borrow(&self) -> LockGuard<'_, T> {
630 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
635 impl<T: Default> Default for Lock<T> {
637 fn default() -> Self {
638 Lock::new(T::default())
642 // FIXME: Probably a bad idea
643 impl<T: Clone> Clone for Lock<T> {
645 fn clone(&self) -> Self {
646 Lock::new(self.borrow().clone())
651 pub struct RwLock<T>(InnerRwLock<T>);
655 pub fn new(inner: T) -> Self {
656 RwLock(InnerRwLock::new(inner))
660 pub fn into_inner(self) -> T {
665 pub fn get_mut(&mut self) -> &mut T {
669 #[cfg(not(parallel_compiler))]
671 pub fn read(&self) -> ReadGuard<'_, T> {
675 #[cfg(parallel_compiler)]
677 pub fn read(&self) -> ReadGuard<'_, T> {
679 self.0.try_read().expect("lock was already held")
686 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
690 #[cfg(not(parallel_compiler))]
692 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
693 self.0.try_borrow_mut().map_err(|_| ())
696 #[cfg(parallel_compiler)]
698 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
699 self.0.try_write().ok_or(())
702 #[cfg(not(parallel_compiler))]
704 pub fn write(&self) -> WriteGuard<'_, T> {
708 #[cfg(parallel_compiler)]
710 pub fn write(&self) -> WriteGuard<'_, T> {
712 self.0.try_write().expect("lock was already held")
719 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
720 f(&mut *self.write())
724 pub fn borrow(&self) -> ReadGuard<'_, T> {
729 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
734 // FIXME: Probably a bad idea
735 impl<T: Clone> Clone for RwLock<T> {
737 fn clone(&self) -> Self {
738 RwLock::new(self.borrow().clone())
742 /// A type which only allows its inner value to be used in one thread.
743 /// It will panic if it is used on multiple threads.
744 #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)]
745 pub struct OneThread<T> {
746 #[cfg(parallel_compiler)]
747 thread: thread::ThreadId,
751 #[cfg(parallel_compiler)]
752 unsafe impl<T> std::marker::Sync for OneThread<T> {}
753 #[cfg(parallel_compiler)]
754 unsafe impl<T> std::marker::Send for OneThread<T> {}
756 impl<T> OneThread<T> {
759 #[cfg(parallel_compiler)]
760 assert_eq!(thread::current().id(), self.thread);
764 pub fn new(inner: T) -> Self {
766 #[cfg(parallel_compiler)]
767 thread: thread::current().id(),
773 pub fn into_inner(value: Self) -> T {
779 impl<T> Deref for OneThread<T> {
782 fn deref(&self) -> &T {
788 impl<T> DerefMut for OneThread<T> {
789 fn deref_mut(&mut self) -> &mut T {