1 //! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
3 //! `Lrc` is an alias of either Rc or Arc.
6 //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7 //! `RefCell` otherwise.
9 //! `RwLock` is a read-write lock.
10 //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11 //! `RefCell` otherwise.
13 //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
15 //! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise.
17 //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
18 //! depending on the value of cfg!(parallel_compiler).
20 use std::collections::HashMap;
21 use std::hash::{Hash, BuildHasher};
22 use std::marker::PhantomData;
23 use std::ops::{Deref, DerefMut};
24 use crate::owning_ref::{Erased, OwningRef};
26 pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
27 where A: FnOnce() -> RA,
33 pub struct SerialScope;
36 pub fn spawn<F>(&self, f: F)
37 where F: FnOnce(&SerialScope)
43 pub fn serial_scope<F, R>(f: F) -> R
44 where F: FnOnce(&SerialScope) -> R
49 pub use std::sync::atomic::Ordering::SeqCst;
50 pub use std::sync::atomic::Ordering;
53 if #[cfg(not(parallel_compiler))] {
54 pub auto trait Send {}
55 pub auto trait Sync {}
57 impl<T: ?Sized> Send for T {}
58 impl<T: ?Sized> Sync for T {}
61 macro_rules! rustc_erase_owner {
70 pub struct Atomic<T: Copy>(Cell<T>);
72 impl<T: Copy> Atomic<T> {
74 pub fn new(v: T) -> Self {
79 impl<T: Copy + PartialEq> Atomic<T> {
80 pub fn into_inner(self) -> T {
85 pub fn load(&self, _: Ordering) -> T {
90 pub fn store(&self, val: T, _: Ordering) {
94 pub fn swap(&self, val: T, _: Ordering) -> T {
98 pub fn compare_exchange(&self,
104 let read = self.0.get();
114 impl<T: Add<Output=T> + Copy> Atomic<T> {
115 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
116 let old = self.0.get();
117 self.0.set(old + val);
122 pub type AtomicUsize = Atomic<usize>;
123 pub type AtomicBool = Atomic<bool>;
124 pub type AtomicU32 = Atomic<u32>;
125 pub type AtomicU64 = Atomic<u64>;
127 pub use self::serial_join as join;
128 pub use self::serial_scope as scope;
131 macro_rules! parallel {
132 ($($blocks:tt),*) => {
137 pub use std::iter::Iterator as ParallelIterator;
139 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
143 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
145 pub use std::rc::Rc as Lrc;
146 pub use std::rc::Weak as Weak;
147 pub use std::cell::Ref as ReadGuard;
148 pub use std::cell::Ref as MappedReadGuard;
149 pub use std::cell::RefMut as WriteGuard;
150 pub use std::cell::RefMut as MappedWriteGuard;
151 pub use std::cell::RefMut as LockGuard;
152 pub use std::cell::RefMut as MappedLockGuard;
154 use std::cell::RefCell as InnerRwLock;
155 use std::cell::RefCell as InnerLock;
160 pub struct WorkerLocal<T>(OneThread<T>);
162 impl<T> WorkerLocal<T> {
163 /// Creates a new worker local where the `initial` closure computes the
164 /// value this worker local should take for each thread in the thread pool.
166 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
167 WorkerLocal(OneThread::new(f(0)))
170 /// Returns the worker-local value for each thread
172 pub fn into_inner(self) -> Vec<T> {
173 vec![OneThread::into_inner(self.0)]
177 impl<T> Deref for WorkerLocal<T> {
181 fn deref(&self) -> &T {
186 pub type MTRef<'a, T> = &'a mut T;
188 #[derive(Debug, Default)]
189 pub struct MTLock<T>(T);
193 pub fn new(inner: T) -> Self {
198 pub fn into_inner(self) -> T {
203 pub fn get_mut(&mut self) -> &mut T {
208 pub fn lock(&self) -> &T {
213 pub fn lock_mut(&mut self) -> &mut T {
218 // FIXME: Probably a bad idea (in the threaded case)
219 impl<T: Clone> Clone for MTLock<T> {
221 fn clone(&self) -> Self {
222 MTLock(self.0.clone())
226 pub use std::marker::Send as Send;
227 pub use std::marker::Sync as Sync;
229 pub use parking_lot::RwLockReadGuard as ReadGuard;
230 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
231 pub use parking_lot::RwLockWriteGuard as WriteGuard;
232 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
234 pub use parking_lot::MutexGuard as LockGuard;
235 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
237 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
239 pub use std::sync::Arc as Lrc;
240 pub use std::sync::Weak as Weak;
242 pub type MTRef<'a, T> = &'a T;
244 #[derive(Debug, Default)]
245 pub struct MTLock<T>(Lock<T>);
249 pub fn new(inner: T) -> Self {
250 MTLock(Lock::new(inner))
254 pub fn into_inner(self) -> T {
259 pub fn get_mut(&mut self) -> &mut T {
264 pub fn lock(&self) -> LockGuard<'_, T> {
269 pub fn lock_mut(&self) -> LockGuard<'_, T> {
274 use parking_lot::Mutex as InnerLock;
275 use parking_lot::RwLock as InnerRwLock;
279 pub use rayon::{join, scope};
282 macro_rules! parallel {
283 (impl [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
284 parallel!(impl [$block, $($c,)*] [$($rest),*])
286 (impl [$($blocks:tt,)*] []) => {
287 ::rustc_data_structures::sync::scope(|s| {
289 s.spawn(|_| $blocks);
293 ($($blocks:tt),*) => {
294 // Reverse the order of the blocks since Rayon executes them in reverse order
295 // when using a single thread. This ensures the execution order matches that
296 // of a single threaded rustc
297 parallel!(impl [] [$($blocks),*]);
301 pub use rayon_core::WorkerLocal;
303 pub use rayon::iter::ParallelIterator;
304 use rayon::iter::IntoParallelIterator;
306 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
310 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
312 /// This makes locks panic if they are already held.
313 /// It is only useful when you are running in a single thread
314 const ERROR_CHECKING: bool = false;
317 macro_rules! rustc_erase_owner {
320 ::rustc_data_structures::sync::assert_send_val(&v);
321 v.erase_send_sync_owner()
327 pub fn assert_sync<T: ?Sized + Sync>() {}
328 pub fn assert_send<T: ?Sized + Send>() {}
329 pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
330 pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
332 pub trait HashMapExt<K, V> {
333 /// Same as HashMap::insert, but it may panic if there's already an
334 /// entry for `key` with a value not equal to `value`
335 fn insert_same(&mut self, key: K, value: V);
338 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
339 fn insert_same(&mut self, key: K, value: V) {
340 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
344 /// A type whose inner value can be written once and then will stay read-only
345 // This contains a PhantomData<T> since this type conceptually owns a T outside the Mutex once
346 // initialized. This ensures that Once<T> is Sync only if T is. If we did not have PhantomData<T>
347 // we could send a &Once<Cell<bool>> to multiple threads and call `get` on it to get access
348 // to &Cell<bool> on those threads.
349 pub struct Once<T>(Lock<Option<T>>, PhantomData<T>);
352 /// Creates an Once value which is uninitialized
354 pub fn new() -> Self {
355 Once(Lock::new(None), PhantomData)
358 /// Consumes the value and returns Some(T) if it was initialized
360 pub fn into_inner(self) -> Option<T> {
364 /// Tries to initialize the inner value to `value`.
365 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
366 /// otherwise if the inner value was already set it returns `value` back to the caller
368 pub fn try_set(&self, value: T) -> Option<T> {
369 let mut lock = self.0.lock();
377 /// Tries to initialize the inner value to `value`.
378 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
379 /// otherwise if the inner value was already set it asserts that `value` is equal to the inner
380 /// value and then returns `value` back to the caller
382 pub fn try_set_same(&self, value: T) -> Option<T> where T: Eq {
383 let mut lock = self.0.lock();
384 if let Some(ref inner) = *lock {
385 assert!(*inner == value);
392 /// Tries to initialize the inner value to `value` and panics if it was already initialized
394 pub fn set(&self, value: T) {
395 assert!(self.try_set(value).is_none());
398 /// Tries to initialize the inner value by calling the closure while ensuring that no-one else
399 /// can access the value in the mean time by holding a lock for the duration of the closure.
400 /// If the value was already initialized the closure is not called and `false` is returned,
401 /// otherwise if the value from the closure initializes the inner value, `true` is returned
403 pub fn init_locking<F: FnOnce() -> T>(&self, f: F) -> bool {
404 let mut lock = self.0.lock();
412 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
413 /// else can access it. This mean when this is called from multiple threads, multiple
414 /// closures may concurrently be computing a value which the inner value should take.
415 /// Only one of these closures are used to actually initialize the value.
416 /// If some other closure already set the value,
417 /// we return the value our closure computed wrapped in a `Option`.
418 /// If our closure set the value, `None` is returned.
419 /// If the value is already initialized, the closure is not called and `None` is returned.
421 pub fn init_nonlocking<F: FnOnce() -> T>(&self, f: F) -> Option<T> {
422 if self.0.lock().is_some() {
429 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
430 /// else can access it. This mean when this is called from multiple threads, multiple
431 /// closures may concurrently be computing a value which the inner value should take.
432 /// Only one of these closures are used to actually initialize the value.
433 /// If some other closure already set the value, we assert that it our closure computed
434 /// a value equal to the value already set and then
435 /// we return the value our closure computed wrapped in a `Option`.
436 /// If our closure set the value, `None` is returned.
437 /// If the value is already initialized, the closure is not called and `None` is returned.
439 pub fn init_nonlocking_same<F: FnOnce() -> T>(&self, f: F) -> Option<T> where T: Eq {
440 if self.0.lock().is_some() {
443 self.try_set_same(f())
447 /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized
449 pub fn try_get(&self) -> Option<&T> {
450 let lock = &*self.0.lock();
451 if let Some(ref inner) = *lock {
452 // This is safe since we won't mutate the inner value
453 unsafe { Some(&*(inner as *const T)) }
459 /// Gets reference to the inner value, panics if it is not yet initialized
461 pub fn get(&self) -> &T {
462 self.try_get().expect("value was not set")
465 /// Gets reference to the inner value, panics if it is not yet initialized
467 pub fn borrow(&self) -> &T {
473 pub struct Lock<T>(InnerLock<T>);
477 pub fn new(inner: T) -> Self {
478 Lock(InnerLock::new(inner))
482 pub fn into_inner(self) -> T {
487 pub fn get_mut(&mut self) -> &mut T {
491 #[cfg(parallel_compiler)]
493 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
497 #[cfg(not(parallel_compiler))]
499 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
500 self.0.try_borrow_mut().ok()
503 #[cfg(parallel_compiler)]
505 pub fn lock(&self) -> LockGuard<'_, T> {
507 self.0.try_lock().expect("lock was already held")
513 #[cfg(not(parallel_compiler))]
515 pub fn lock(&self) -> LockGuard<'_, T> {
520 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
525 pub fn borrow(&self) -> LockGuard<'_, T> {
530 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
535 impl<T: Default> Default for Lock<T> {
537 fn default() -> Self {
538 Lock::new(T::default())
542 // FIXME: Probably a bad idea
543 impl<T: Clone> Clone for Lock<T> {
545 fn clone(&self) -> Self {
546 Lock::new(self.borrow().clone())
551 pub struct RwLock<T>(InnerRwLock<T>);
555 pub fn new(inner: T) -> Self {
556 RwLock(InnerRwLock::new(inner))
560 pub fn into_inner(self) -> T {
565 pub fn get_mut(&mut self) -> &mut T {
569 #[cfg(not(parallel_compiler))]
571 pub fn read(&self) -> ReadGuard<'_, T> {
575 #[cfg(parallel_compiler)]
577 pub fn read(&self) -> ReadGuard<'_, T> {
579 self.0.try_read().expect("lock was already held")
586 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
590 #[cfg(not(parallel_compiler))]
592 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
593 self.0.try_borrow_mut().map_err(|_| ())
596 #[cfg(parallel_compiler)]
598 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
599 self.0.try_write().ok_or(())
602 #[cfg(not(parallel_compiler))]
604 pub fn write(&self) -> WriteGuard<'_, T> {
608 #[cfg(parallel_compiler)]
610 pub fn write(&self) -> WriteGuard<'_, T> {
612 self.0.try_write().expect("lock was already held")
619 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
620 f(&mut *self.write())
624 pub fn borrow(&self) -> ReadGuard<'_, T> {
629 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
634 // FIXME: Probably a bad idea
635 impl<T: Clone> Clone for RwLock<T> {
637 fn clone(&self) -> Self {
638 RwLock::new(self.borrow().clone())
642 /// A type which only allows its inner value to be used in one thread.
643 /// It will panic if it is used on multiple threads.
644 #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)]
645 pub struct OneThread<T> {
646 #[cfg(parallel_compiler)]
647 thread: thread::ThreadId,
651 #[cfg(parallel_compiler)]
652 unsafe impl<T> std::marker::Sync for OneThread<T> {}
653 #[cfg(parallel_compiler)]
654 unsafe impl<T> std::marker::Send for OneThread<T> {}
656 impl<T> OneThread<T> {
659 #[cfg(parallel_compiler)]
660 assert_eq!(thread::current().id(), self.thread);
664 pub fn new(inner: T) -> Self {
666 #[cfg(parallel_compiler)]
667 thread: thread::current().id(),
673 pub fn into_inner(value: Self) -> T {
679 impl<T> Deref for OneThread<T> {
682 fn deref(&self) -> &T {
688 impl<T> DerefMut for OneThread<T> {
689 fn deref_mut(&mut self) -> &mut T {