1 //! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
3 //! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
6 //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7 //! `RefCell` otherwise.
9 //! `RwLock` is a read-write lock.
10 //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11 //! `RefCell` otherwise.
13 //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
15 //! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
17 //! `rustc_erase_owner!` erases an OwningRef owner into Erased or Erased + Send + Sync
18 //! depending on the value of cfg!(parallel_compiler).
20 use crate::owning_ref::{Erased, OwningRef};
21 use std::collections::HashMap;
22 use std::hash::{BuildHasher, Hash};
23 use std::ops::{Deref, DerefMut};
24 use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
26 pub use std::sync::atomic::Ordering;
27 pub use std::sync::atomic::Ordering::SeqCst;
30 if #[cfg(not(parallel_compiler))] {
31 pub auto trait Send {}
32 pub auto trait Sync {}
34 impl<T: ?Sized> Send for T {}
35 impl<T: ?Sized> Sync for T {}
38 macro_rules! rustc_erase_owner {
46 /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
47 /// It has explicit ordering arguments and is only intended for use with
48 /// the native atomic types.
49 /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
50 /// as it's not intended to be used separately.
51 #[derive(Debug, Default)]
52 pub struct Atomic<T: Copy>(Cell<T>);
54 impl<T: Copy> Atomic<T> {
56 pub fn new(v: T) -> Self {
61 pub fn into_inner(self) -> T {
66 pub fn load(&self, _: Ordering) -> T {
71 pub fn store(&self, val: T, _: Ordering) {
76 pub fn swap(&self, val: T, _: Ordering) -> T {
81 impl<T: Copy + PartialEq> Atomic<T> {
83 pub fn compare_exchange(&self,
89 let read = self.0.get();
99 impl<T: Add<Output=T> + Copy> Atomic<T> {
101 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
102 let old = self.0.get();
103 self.0.set(old + val);
108 pub type AtomicUsize = Atomic<usize>;
109 pub type AtomicBool = Atomic<bool>;
110 pub type AtomicU32 = Atomic<u32>;
111 pub type AtomicU64 = Atomic<u64>;
113 pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
114 where A: FnOnce() -> RA,
121 macro_rules! parallel {
122 ($($blocks:tt),*) => {
123 // We catch panics here ensuring that all the blocks execute.
124 // This makes behavior consistent with the parallel compiler.
125 let mut panic = None;
127 if let Err(p) = ::std::panic::catch_unwind(
128 ::std::panic::AssertUnwindSafe(|| $blocks)
135 if let Some(panic) = panic {
136 ::std::panic::resume_unwind(panic);
141 pub use std::iter::Iterator as ParallelIterator;
143 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
147 pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
148 // We catch panics here ensuring that all the loop iterations execute.
149 // This makes behavior consistent with the parallel compiler.
150 let mut panic = None;
151 t.into_iter().for_each(|i| {
152 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
158 if let Some(panic) = panic {
159 resume_unwind(panic);
163 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
165 pub use std::rc::Rc as Lrc;
166 pub use std::rc::Weak as Weak;
167 pub use std::cell::Ref as ReadGuard;
168 pub use std::cell::Ref as MappedReadGuard;
169 pub use std::cell::RefMut as WriteGuard;
170 pub use std::cell::RefMut as MappedWriteGuard;
171 pub use std::cell::RefMut as LockGuard;
172 pub use std::cell::RefMut as MappedLockGuard;
174 pub use std::cell::OnceCell;
176 use std::cell::RefCell as InnerRwLock;
177 use std::cell::RefCell as InnerLock;
182 pub struct WorkerLocal<T>(OneThread<T>);
184 impl<T> WorkerLocal<T> {
185 /// Creates a new worker local where the `initial` closure computes the
186 /// value this worker local should take for each thread in the thread pool.
188 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
189 WorkerLocal(OneThread::new(f(0)))
192 /// Returns the worker-local value for each thread
194 pub fn into_inner(self) -> Vec<T> {
195 vec![OneThread::into_inner(self.0)]
199 impl<T> Deref for WorkerLocal<T> {
203 fn deref(&self) -> &T {
208 pub type MTRef<'a, T> = &'a mut T;
210 #[derive(Debug, Default)]
211 pub struct MTLock<T>(T);
215 pub fn new(inner: T) -> Self {
220 pub fn into_inner(self) -> T {
225 pub fn get_mut(&mut self) -> &mut T {
230 pub fn lock(&self) -> &T {
235 pub fn lock_mut(&mut self) -> &mut T {
240 // FIXME: Probably a bad idea (in the threaded case)
241 impl<T: Clone> Clone for MTLock<T> {
243 fn clone(&self) -> Self {
244 MTLock(self.0.clone())
248 pub use std::marker::Send as Send;
249 pub use std::marker::Sync as Sync;
251 pub use parking_lot::RwLockReadGuard as ReadGuard;
252 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
253 pub use parking_lot::RwLockWriteGuard as WriteGuard;
254 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
256 pub use parking_lot::MutexGuard as LockGuard;
257 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
259 pub use std::sync::OnceLock as OnceCell;
261 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
263 pub use std::sync::Arc as Lrc;
264 pub use std::sync::Weak as Weak;
266 pub type MTRef<'a, T> = &'a T;
268 #[derive(Debug, Default)]
269 pub struct MTLock<T>(Lock<T>);
273 pub fn new(inner: T) -> Self {
274 MTLock(Lock::new(inner))
278 pub fn into_inner(self) -> T {
283 pub fn get_mut(&mut self) -> &mut T {
288 pub fn lock(&self) -> LockGuard<'_, T> {
293 pub fn lock_mut(&self) -> LockGuard<'_, T> {
298 use parking_lot::Mutex as InnerLock;
299 use parking_lot::RwLock as InnerRwLock;
302 pub use rayon::{join, scope};
304 /// Runs a list of blocks in parallel. The first block is executed immediately on
305 /// the current thread. Use that for the longest running block.
307 macro_rules! parallel {
308 (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
309 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
311 (impl $fblock:tt [$($blocks:tt,)*] []) => {
312 ::rustc_data_structures::sync::scope(|s| {
314 s.spawn(|_| $blocks);
319 ($fblock:tt, $($blocks:tt),*) => {
320 // Reverse the order of the later blocks since Rayon executes them in reverse order
321 // when using a single thread. This ensures the execution order matches that
322 // of a single threaded rustc
323 parallel!(impl $fblock [] [$($blocks),*]);
327 pub use rayon_core::WorkerLocal;
329 pub use rayon::iter::ParallelIterator;
330 use rayon::iter::IntoParallelIterator;
332 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
336 pub fn par_for_each_in<T: IntoParallelIterator>(
338 for_each: impl Fn(T::Item) + Sync + Send,
340 let ps: Vec<_> = t.into_par_iter().map(|i| catch_unwind(AssertUnwindSafe(|| for_each(i)))).collect();
341 ps.into_iter().for_each(|p| if let Err(panic) = p {
346 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
348 /// This makes locks panic if they are already held.
349 /// It is only useful when you are running in a single thread
350 const ERROR_CHECKING: bool = false;
353 macro_rules! rustc_erase_owner {
356 ::rustc_data_structures::sync::assert_send_val(&v);
357 v.erase_send_sync_owner()
363 pub fn assert_sync<T: ?Sized + Sync>() {}
364 pub fn assert_send<T: ?Sized + Send>() {}
365 pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
366 pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
368 pub trait HashMapExt<K, V> {
369 /// Same as HashMap::insert, but it may panic if there's already an
370 /// entry for `key` with a value not equal to `value`
371 fn insert_same(&mut self, key: K, value: V);
374 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
375 fn insert_same(&mut self, key: K, value: V) {
376 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
381 pub struct Lock<T>(InnerLock<T>);
385 pub fn new(inner: T) -> Self {
386 Lock(InnerLock::new(inner))
390 pub fn into_inner(self) -> T {
395 pub fn get_mut(&mut self) -> &mut T {
399 #[cfg(parallel_compiler)]
401 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
405 #[cfg(not(parallel_compiler))]
407 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
408 self.0.try_borrow_mut().ok()
411 #[cfg(parallel_compiler)]
413 pub fn lock(&self) -> LockGuard<'_, T> {
415 self.0.try_lock().expect("lock was already held")
421 #[cfg(not(parallel_compiler))]
423 pub fn lock(&self) -> LockGuard<'_, T> {
428 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
433 pub fn borrow(&self) -> LockGuard<'_, T> {
438 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
443 impl<T: Default> Default for Lock<T> {
445 fn default() -> Self {
446 Lock::new(T::default())
450 // FIXME: Probably a bad idea
451 impl<T: Clone> Clone for Lock<T> {
453 fn clone(&self) -> Self {
454 Lock::new(self.borrow().clone())
458 #[derive(Debug, Default)]
459 pub struct RwLock<T>(InnerRwLock<T>);
463 pub fn new(inner: T) -> Self {
464 RwLock(InnerRwLock::new(inner))
468 pub fn into_inner(self) -> T {
473 pub fn get_mut(&mut self) -> &mut T {
477 #[cfg(not(parallel_compiler))]
479 pub fn read(&self) -> ReadGuard<'_, T> {
483 #[cfg(parallel_compiler)]
485 pub fn read(&self) -> ReadGuard<'_, T> {
487 self.0.try_read().expect("lock was already held")
494 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
498 #[cfg(not(parallel_compiler))]
500 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
501 self.0.try_borrow_mut().map_err(|_| ())
504 #[cfg(parallel_compiler)]
506 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
507 self.0.try_write().ok_or(())
510 #[cfg(not(parallel_compiler))]
512 pub fn write(&self) -> WriteGuard<'_, T> {
516 #[cfg(parallel_compiler)]
518 pub fn write(&self) -> WriteGuard<'_, T> {
520 self.0.try_write().expect("lock was already held")
527 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
528 f(&mut *self.write())
532 pub fn borrow(&self) -> ReadGuard<'_, T> {
537 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
541 #[cfg(not(parallel_compiler))]
543 pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
547 #[cfg(parallel_compiler)]
549 pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
550 ReadGuard::rwlock(&rg).read()
553 #[cfg(not(parallel_compiler))]
555 pub fn leak(&self) -> &T {
556 ReadGuard::leak(self.read())
559 #[cfg(parallel_compiler)]
561 pub fn leak(&self) -> &T {
562 let guard = self.read();
563 let ret = unsafe { &*(&*guard as *const T) };
564 std::mem::forget(guard);
569 // FIXME: Probably a bad idea
570 impl<T: Clone> Clone for RwLock<T> {
572 fn clone(&self) -> Self {
573 RwLock::new(self.borrow().clone())
577 /// A type which only allows its inner value to be used in one thread.
578 /// It will panic if it is used on multiple threads.
580 pub struct OneThread<T> {
581 #[cfg(parallel_compiler)]
582 thread: thread::ThreadId,
586 #[cfg(parallel_compiler)]
587 unsafe impl<T> std::marker::Sync for OneThread<T> {}
588 #[cfg(parallel_compiler)]
589 unsafe impl<T> std::marker::Send for OneThread<T> {}
591 impl<T> OneThread<T> {
594 #[cfg(parallel_compiler)]
595 assert_eq!(thread::current().id(), self.thread);
599 pub fn new(inner: T) -> Self {
601 #[cfg(parallel_compiler)]
602 thread: thread::current().id(),
608 pub fn into_inner(value: Self) -> T {
614 impl<T> Deref for OneThread<T> {
617 fn deref(&self) -> &T {
623 impl<T> DerefMut for OneThread<T> {
624 fn deref_mut(&mut self) -> &mut T {