1 //! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
3 //! `Lrc` is an alias of either Rc or Arc.
6 //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7 //! `RefCell` otherwise.
9 //! `RwLock` is a read-write lock.
10 //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11 //! `RefCell` otherwise.
13 //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
15 //! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise.
17 //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
18 //! depending on the value of cfg!(parallel_compiler).
20 use std::collections::HashMap;
21 use std::hash::{Hash, BuildHasher};
22 use std::marker::PhantomData;
23 use std::ops::{Deref, DerefMut};
24 use crate::owning_ref::{Erased, OwningRef};
26 pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
27 where A: FnOnce() -> RA,
33 pub struct SerialScope;
36 pub fn spawn<F>(&self, f: F)
37 where F: FnOnce(&SerialScope)
43 pub fn serial_scope<F, R>(f: F) -> R
44 where F: FnOnce(&SerialScope) -> R
49 pub use std::sync::atomic::Ordering::SeqCst;
50 pub use std::sync::atomic::Ordering;
53 if #[cfg(not(parallel_compiler))] {
54 pub auto trait Send {}
55 pub auto trait Sync {}
57 impl<T: ?Sized> Send for T {}
58 impl<T: ?Sized> Sync for T {}
61 macro_rules! rustc_erase_owner {
68 use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe};
71 pub struct Atomic<T: Copy>(Cell<T>);
73 impl<T: Copy> Atomic<T> {
75 pub fn new(v: T) -> Self {
80 impl<T: Copy + PartialEq> Atomic<T> {
81 pub fn into_inner(self) -> T {
86 pub fn load(&self, _: Ordering) -> T {
91 pub fn store(&self, val: T, _: Ordering) {
95 pub fn swap(&self, val: T, _: Ordering) -> T {
99 pub fn compare_exchange(&self,
105 let read = self.0.get();
115 impl<T: Add<Output=T> + Copy> Atomic<T> {
116 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
117 let old = self.0.get();
118 self.0.set(old + val);
123 pub type AtomicUsize = Atomic<usize>;
124 pub type AtomicBool = Atomic<bool>;
125 pub type AtomicU32 = Atomic<u32>;
126 pub type AtomicU64 = Atomic<u64>;
128 pub use self::serial_join as join;
129 pub use self::serial_scope as scope;
132 macro_rules! parallel {
133 ($($blocks:tt),*) => {
134 let mut panic = None;
136 if let Err(p) = ::std::panic::catch_unwind(
137 ::std::panic::AssertUnwindSafe(|| $blocks)
144 if let Some(panic) = panic {
145 ::std::panic::resume_unwind(panic);
150 pub use std::iter::Iterator as ParallelIterator;
152 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
156 pub fn par_for_each_in<T: IntoIterator>(
159 impl Fn(<<T as IntoIterator>::IntoIter as Iterator>::Item) + Sync + Send
161 let mut panic = None;
162 t.into_iter().for_each(|i| {
163 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
169 if let Some(panic) = panic {
170 resume_unwind(panic);
174 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
176 pub use std::rc::Rc as Lrc;
177 pub use std::rc::Weak as Weak;
178 pub use std::cell::Ref as ReadGuard;
179 pub use std::cell::Ref as MappedReadGuard;
180 pub use std::cell::RefMut as WriteGuard;
181 pub use std::cell::RefMut as MappedWriteGuard;
182 pub use std::cell::RefMut as LockGuard;
183 pub use std::cell::RefMut as MappedLockGuard;
185 use std::cell::RefCell as InnerRwLock;
186 use std::cell::RefCell as InnerLock;
191 pub struct WorkerLocal<T>(OneThread<T>);
193 impl<T> WorkerLocal<T> {
194 /// Creates a new worker local where the `initial` closure computes the
195 /// value this worker local should take for each thread in the thread pool.
197 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
198 WorkerLocal(OneThread::new(f(0)))
201 /// Returns the worker-local value for each thread
203 pub fn into_inner(self) -> Vec<T> {
204 vec![OneThread::into_inner(self.0)]
208 impl<T> Deref for WorkerLocal<T> {
212 fn deref(&self) -> &T {
217 pub type MTRef<'a, T> = &'a mut T;
219 #[derive(Debug, Default)]
220 pub struct MTLock<T>(T);
224 pub fn new(inner: T) -> Self {
229 pub fn into_inner(self) -> T {
234 pub fn get_mut(&mut self) -> &mut T {
239 pub fn lock(&self) -> &T {
244 pub fn lock_mut(&mut self) -> &mut T {
249 // FIXME: Probably a bad idea (in the threaded case)
250 impl<T: Clone> Clone for MTLock<T> {
252 fn clone(&self) -> Self {
253 MTLock(self.0.clone())
257 pub use std::marker::Send as Send;
258 pub use std::marker::Sync as Sync;
260 pub use parking_lot::RwLockReadGuard as ReadGuard;
261 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
262 pub use parking_lot::RwLockWriteGuard as WriteGuard;
263 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
265 pub use parking_lot::MutexGuard as LockGuard;
266 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
268 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
270 pub use std::sync::Arc as Lrc;
271 pub use std::sync::Weak as Weak;
273 pub type MTRef<'a, T> = &'a T;
275 #[derive(Debug, Default)]
276 pub struct MTLock<T>(Lock<T>);
280 pub fn new(inner: T) -> Self {
281 MTLock(Lock::new(inner))
285 pub fn into_inner(self) -> T {
290 pub fn get_mut(&mut self) -> &mut T {
295 pub fn lock(&self) -> LockGuard<'_, T> {
300 pub fn lock_mut(&self) -> LockGuard<'_, T> {
305 use parking_lot::Mutex as InnerLock;
306 use parking_lot::RwLock as InnerRwLock;
310 pub use rayon::{join, scope};
313 macro_rules! parallel {
314 (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
315 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
317 (impl $fblock:tt [$($blocks:tt,)*] []) => {
318 ::rustc_data_structures::sync::scope(|s| {
320 s.spawn(|_| $blocks);
325 ($fblock:tt, $($blocks:tt),*) => {
326 // Reverse the order of the blocks since Rayon executes them in reverse order
327 // when using a single thread. This ensures the execution order matches that
328 // of a single threaded rustc
329 parallel!(impl $fblock [] [$($blocks),*]);
333 pub use rayon_core::WorkerLocal;
335 pub use rayon::iter::ParallelIterator;
336 use rayon::iter::IntoParallelIterator;
338 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
342 pub fn par_for_each_in<T: IntoParallelIterator>(
345 <<T as IntoParallelIterator>::Iter as ParallelIterator>::Item
348 t.into_par_iter().for_each(for_each)
351 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
353 /// This makes locks panic if they are already held.
354 /// It is only useful when you are running in a single thread
355 const ERROR_CHECKING: bool = false;
358 macro_rules! rustc_erase_owner {
361 ::rustc_data_structures::sync::assert_send_val(&v);
362 v.erase_send_sync_owner()
368 pub fn assert_sync<T: ?Sized + Sync>() {}
369 pub fn assert_send<T: ?Sized + Send>() {}
370 pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
371 pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
373 pub trait HashMapExt<K, V> {
374 /// Same as HashMap::insert, but it may panic if there's already an
375 /// entry for `key` with a value not equal to `value`
376 fn insert_same(&mut self, key: K, value: V);
379 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
380 fn insert_same(&mut self, key: K, value: V) {
381 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
385 /// A type whose inner value can be written once and then will stay read-only
386 // This contains a PhantomData<T> since this type conceptually owns a T outside the Mutex once
387 // initialized. This ensures that Once<T> is Sync only if T is. If we did not have PhantomData<T>
388 // we could send a &Once<Cell<bool>> to multiple threads and call `get` on it to get access
389 // to &Cell<bool> on those threads.
390 pub struct Once<T>(Lock<Option<T>>, PhantomData<T>);
393 /// Creates an Once value which is uninitialized
395 pub fn new() -> Self {
396 Once(Lock::new(None), PhantomData)
399 /// Consumes the value and returns Some(T) if it was initialized
401 pub fn into_inner(self) -> Option<T> {
405 /// Tries to initialize the inner value to `value`.
406 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
407 /// otherwise if the inner value was already set it returns `value` back to the caller
409 pub fn try_set(&self, value: T) -> Option<T> {
410 let mut lock = self.0.lock();
418 /// Tries to initialize the inner value to `value`.
419 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
420 /// otherwise if the inner value was already set it asserts that `value` is equal to the inner
421 /// value and then returns `value` back to the caller
423 pub fn try_set_same(&self, value: T) -> Option<T> where T: Eq {
424 let mut lock = self.0.lock();
425 if let Some(ref inner) = *lock {
426 assert!(*inner == value);
433 /// Tries to initialize the inner value to `value` and panics if it was already initialized
435 pub fn set(&self, value: T) {
436 assert!(self.try_set(value).is_none());
439 /// Tries to initialize the inner value by calling the closure while ensuring that no-one else
440 /// can access the value in the mean time by holding a lock for the duration of the closure.
441 /// If the value was already initialized the closure is not called and `false` is returned,
442 /// otherwise if the value from the closure initializes the inner value, `true` is returned
444 pub fn init_locking<F: FnOnce() -> T>(&self, f: F) -> bool {
445 let mut lock = self.0.lock();
453 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
454 /// else can access it. This mean when this is called from multiple threads, multiple
455 /// closures may concurrently be computing a value which the inner value should take.
456 /// Only one of these closures are used to actually initialize the value.
457 /// If some other closure already set the value,
458 /// we return the value our closure computed wrapped in a `Option`.
459 /// If our closure set the value, `None` is returned.
460 /// If the value is already initialized, the closure is not called and `None` is returned.
462 pub fn init_nonlocking<F: FnOnce() -> T>(&self, f: F) -> Option<T> {
463 if self.0.lock().is_some() {
470 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
471 /// else can access it. This mean when this is called from multiple threads, multiple
472 /// closures may concurrently be computing a value which the inner value should take.
473 /// Only one of these closures are used to actually initialize the value.
474 /// If some other closure already set the value, we assert that it our closure computed
475 /// a value equal to the value already set and then
476 /// we return the value our closure computed wrapped in a `Option`.
477 /// If our closure set the value, `None` is returned.
478 /// If the value is already initialized, the closure is not called and `None` is returned.
480 pub fn init_nonlocking_same<F: FnOnce() -> T>(&self, f: F) -> Option<T> where T: Eq {
481 if self.0.lock().is_some() {
484 self.try_set_same(f())
488 /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized
490 pub fn try_get(&self) -> Option<&T> {
491 let lock = &*self.0.lock();
492 if let Some(ref inner) = *lock {
493 // This is safe since we won't mutate the inner value
494 unsafe { Some(&*(inner as *const T)) }
500 /// Gets reference to the inner value, panics if it is not yet initialized
502 pub fn get(&self) -> &T {
503 self.try_get().expect("value was not set")
506 /// Gets reference to the inner value, panics if it is not yet initialized
508 pub fn borrow(&self) -> &T {
514 pub struct Lock<T>(InnerLock<T>);
518 pub fn new(inner: T) -> Self {
519 Lock(InnerLock::new(inner))
523 pub fn into_inner(self) -> T {
528 pub fn get_mut(&mut self) -> &mut T {
532 #[cfg(parallel_compiler)]
534 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
538 #[cfg(not(parallel_compiler))]
540 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
541 self.0.try_borrow_mut().ok()
544 #[cfg(parallel_compiler)]
546 pub fn lock(&self) -> LockGuard<'_, T> {
548 self.0.try_lock().expect("lock was already held")
554 #[cfg(not(parallel_compiler))]
556 pub fn lock(&self) -> LockGuard<'_, T> {
561 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
566 pub fn borrow(&self) -> LockGuard<'_, T> {
571 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
576 impl<T: Default> Default for Lock<T> {
578 fn default() -> Self {
579 Lock::new(T::default())
583 // FIXME: Probably a bad idea
584 impl<T: Clone> Clone for Lock<T> {
586 fn clone(&self) -> Self {
587 Lock::new(self.borrow().clone())
592 pub struct RwLock<T>(InnerRwLock<T>);
596 pub fn new(inner: T) -> Self {
597 RwLock(InnerRwLock::new(inner))
601 pub fn into_inner(self) -> T {
606 pub fn get_mut(&mut self) -> &mut T {
610 #[cfg(not(parallel_compiler))]
612 pub fn read(&self) -> ReadGuard<'_, T> {
616 #[cfg(parallel_compiler)]
618 pub fn read(&self) -> ReadGuard<'_, T> {
620 self.0.try_read().expect("lock was already held")
627 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
631 #[cfg(not(parallel_compiler))]
633 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
634 self.0.try_borrow_mut().map_err(|_| ())
637 #[cfg(parallel_compiler)]
639 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
640 self.0.try_write().ok_or(())
643 #[cfg(not(parallel_compiler))]
645 pub fn write(&self) -> WriteGuard<'_, T> {
649 #[cfg(parallel_compiler)]
651 pub fn write(&self) -> WriteGuard<'_, T> {
653 self.0.try_write().expect("lock was already held")
660 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
661 f(&mut *self.write())
665 pub fn borrow(&self) -> ReadGuard<'_, T> {
670 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
675 // FIXME: Probably a bad idea
676 impl<T: Clone> Clone for RwLock<T> {
678 fn clone(&self) -> Self {
679 RwLock::new(self.borrow().clone())
683 /// A type which only allows its inner value to be used in one thread.
684 /// It will panic if it is used on multiple threads.
685 #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)]
686 pub struct OneThread<T> {
687 #[cfg(parallel_compiler)]
688 thread: thread::ThreadId,
692 #[cfg(parallel_compiler)]
693 unsafe impl<T> std::marker::Sync for OneThread<T> {}
694 #[cfg(parallel_compiler)]
695 unsafe impl<T> std::marker::Send for OneThread<T> {}
697 impl<T> OneThread<T> {
700 #[cfg(parallel_compiler)]
701 assert_eq!(thread::current().id(), self.thread);
705 pub fn new(inner: T) -> Self {
707 #[cfg(parallel_compiler)]
708 thread: thread::current().id(),
714 pub fn into_inner(value: Self) -> T {
720 impl<T> Deref for OneThread<T> {
723 fn deref(&self) -> &T {
729 impl<T> DerefMut for OneThread<T> {
730 fn deref_mut(&mut self) -> &mut T {