1 //! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
3 //! `Lrc` is an alias of either Rc or Arc.
6 //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7 //! `RefCell` otherwise.
9 //! `RwLock` is a read-write lock.
10 //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11 //! `RefCell` otherwise.
13 //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
15 //! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise.
17 //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
18 //! depending on the value of cfg!(parallel_compiler).
20 use std::collections::HashMap;
21 use std::hash::{Hash, BuildHasher};
22 use std::marker::PhantomData;
23 use std::ops::{Deref, DerefMut};
24 use owning_ref::{Erased, OwningRef};
26 pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
27 where A: FnOnce() -> RA,
33 pub struct SerialScope;
36 pub fn spawn<F>(&self, f: F)
37 where F: FnOnce(&SerialScope)
43 pub fn serial_scope<F, R>(f: F) -> R
44 where F: FnOnce(&SerialScope) -> R
49 pub use std::sync::atomic::Ordering::SeqCst;
50 pub use std::sync::atomic::Ordering;
53 if #[cfg(not(parallel_compiler))] {
54 pub auto trait Send {}
55 pub auto trait Sync {}
57 impl<T: ?Sized> Send for T {}
58 impl<T: ?Sized> Sync for T {}
61 macro_rules! rustc_erase_owner {
70 pub struct Atomic<T: Copy>(Cell<T>);
72 impl<T: Copy> Atomic<T> {
74 pub fn new(v: T) -> Self {
79 impl<T: Copy + PartialEq> Atomic<T> {
80 pub fn into_inner(self) -> T {
85 pub fn load(&self, _: Ordering) -> T {
90 pub fn store(&self, val: T, _: Ordering) {
94 pub fn swap(&self, val: T, _: Ordering) -> T {
98 pub fn compare_exchange(&self,
104 let read = self.0.get();
114 impl<T: Add<Output=T> + Copy> Atomic<T> {
115 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
116 let old = self.0.get();
117 self.0.set(old + val);
122 pub type AtomicUsize = Atomic<usize>;
123 pub type AtomicBool = Atomic<bool>;
124 pub type AtomicU32 = Atomic<u32>;
125 pub type AtomicU64 = Atomic<u64>;
127 pub use self::serial_join as join;
128 pub use self::serial_scope as scope;
130 pub use std::iter::Iterator as ParallelIterator;
132 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
136 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
138 pub use std::rc::Rc as Lrc;
139 pub use std::rc::Weak as Weak;
140 pub use std::cell::Ref as ReadGuard;
141 pub use std::cell::Ref as MappedReadGuard;
142 pub use std::cell::RefMut as WriteGuard;
143 pub use std::cell::RefMut as MappedWriteGuard;
144 pub use std::cell::RefMut as LockGuard;
145 pub use std::cell::RefMut as MappedLockGuard;
147 use std::cell::RefCell as InnerRwLock;
148 use std::cell::RefCell as InnerLock;
153 pub struct WorkerLocal<T>(OneThread<T>);
155 impl<T> WorkerLocal<T> {
156 /// Creates a new worker local where the `initial` closure computes the
157 /// value this worker local should take for each thread in the thread pool.
159 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
160 WorkerLocal(OneThread::new(f(0)))
163 /// Returns the worker-local value for each thread
165 pub fn into_inner(self) -> Vec<T> {
166 vec![OneThread::into_inner(self.0)]
170 impl<T> Deref for WorkerLocal<T> {
174 fn deref(&self) -> &T {
179 pub type MTRef<'a, T> = &'a mut T;
181 #[derive(Debug, Default)]
182 pub struct MTLock<T>(T);
186 pub fn new(inner: T) -> Self {
191 pub fn into_inner(self) -> T {
196 pub fn get_mut(&mut self) -> &mut T {
201 pub fn lock(&self) -> &T {
206 pub fn lock_mut(&mut self) -> &mut T {
211 // FIXME: Probably a bad idea (in the threaded case)
212 impl<T: Clone> Clone for MTLock<T> {
214 fn clone(&self) -> Self {
215 MTLock(self.0.clone())
219 pub use std::marker::Send as Send;
220 pub use std::marker::Sync as Sync;
222 pub use parking_lot::RwLockReadGuard as ReadGuard;
223 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
224 pub use parking_lot::RwLockWriteGuard as WriteGuard;
225 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
227 pub use parking_lot::MutexGuard as LockGuard;
228 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
230 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
232 pub use std::sync::Arc as Lrc;
233 pub use std::sync::Weak as Weak;
235 pub type MTRef<'a, T> = &'a T;
237 #[derive(Debug, Default)]
238 pub struct MTLock<T>(Lock<T>);
242 pub fn new(inner: T) -> Self {
243 MTLock(Lock::new(inner))
247 pub fn into_inner(self) -> T {
252 pub fn get_mut(&mut self) -> &mut T {
257 pub fn lock(&self) -> LockGuard<T> {
262 pub fn lock_mut(&self) -> LockGuard<T> {
267 use parking_lot::Mutex as InnerLock;
268 use parking_lot::RwLock as InnerRwLock;
272 pub use rayon::{join, scope};
274 pub use rayon_core::WorkerLocal;
276 pub use rayon::iter::ParallelIterator;
277 use rayon::iter::IntoParallelIterator;
279 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
283 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
285 /// This makes locks panic if they are already held.
286 /// It is only useful when you are running in a single thread
287 const ERROR_CHECKING: bool = false;
290 macro_rules! rustc_erase_owner {
293 ::rustc_data_structures::sync::assert_send_val(&v);
294 v.erase_send_sync_owner()
300 pub fn assert_sync<T: ?Sized + Sync>() {}
301 pub fn assert_send<T: ?Sized + Send>() {}
302 pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
303 pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
305 pub trait HashMapExt<K, V> {
306 /// Same as HashMap::insert, but it may panic if there's already an
307 /// entry for `key` with a value not equal to `value`
308 fn insert_same(&mut self, key: K, value: V);
311 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
312 fn insert_same(&mut self, key: K, value: V) {
313 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
317 /// A type whose inner value can be written once and then will stay read-only
318 // This contains a PhantomData<T> since this type conceptually owns a T outside the Mutex once
319 // initialized. This ensures that Once<T> is Sync only if T is. If we did not have PhantomData<T>
320 // we could send a &Once<Cell<bool>> to multiple threads and call `get` on it to get access
321 // to &Cell<bool> on those threads.
322 pub struct Once<T>(Lock<Option<T>>, PhantomData<T>);
325 /// Creates an Once value which is uninitialized
327 pub fn new() -> Self {
328 Once(Lock::new(None), PhantomData)
331 /// Consumes the value and returns Some(T) if it was initialized
333 pub fn into_inner(self) -> Option<T> {
337 /// Tries to initialize the inner value to `value`.
338 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
339 /// otherwise if the inner value was already set it returns `value` back to the caller
341 pub fn try_set(&self, value: T) -> Option<T> {
342 let mut lock = self.0.lock();
350 /// Tries to initialize the inner value to `value`.
351 /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it
352 /// otherwise if the inner value was already set it asserts that `value` is equal to the inner
353 /// value and then returns `value` back to the caller
355 pub fn try_set_same(&self, value: T) -> Option<T> where T: Eq {
356 let mut lock = self.0.lock();
357 if let Some(ref inner) = *lock {
358 assert!(*inner == value);
365 /// Tries to initialize the inner value to `value` and panics if it was already initialized
367 pub fn set(&self, value: T) {
368 assert!(self.try_set(value).is_none());
371 /// Tries to initialize the inner value by calling the closure while ensuring that no-one else
372 /// can access the value in the mean time by holding a lock for the duration of the closure.
373 /// If the value was already initialized the closure is not called and `false` is returned,
374 /// otherwise if the value from the closure initializes the inner value, `true` is returned
376 pub fn init_locking<F: FnOnce() -> T>(&self, f: F) -> bool {
377 let mut lock = self.0.lock();
385 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
386 /// else can access it. This mean when this is called from multiple threads, multiple
387 /// closures may concurrently be computing a value which the inner value should take.
388 /// Only one of these closures are used to actually initialize the value.
389 /// If some other closure already set the value,
390 /// we return the value our closure computed wrapped in a `Option`.
391 /// If our closure set the value, `None` is returned.
392 /// If the value is already initialized, the closure is not called and `None` is returned.
394 pub fn init_nonlocking<F: FnOnce() -> T>(&self, f: F) -> Option<T> {
395 if self.0.lock().is_some() {
402 /// Tries to initialize the inner value by calling the closure without ensuring that no-one
403 /// else can access it. This mean when this is called from multiple threads, multiple
404 /// closures may concurrently be computing a value which the inner value should take.
405 /// Only one of these closures are used to actually initialize the value.
406 /// If some other closure already set the value, we assert that it our closure computed
407 /// a value equal to the value already set and then
408 /// we return the value our closure computed wrapped in a `Option`.
409 /// If our closure set the value, `None` is returned.
410 /// If the value is already initialized, the closure is not called and `None` is returned.
412 pub fn init_nonlocking_same<F: FnOnce() -> T>(&self, f: F) -> Option<T> where T: Eq {
413 if self.0.lock().is_some() {
416 self.try_set_same(f())
420 /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized
422 pub fn try_get(&self) -> Option<&T> {
423 let lock = &*self.0.lock();
424 if let Some(ref inner) = *lock {
425 // This is safe since we won't mutate the inner value
426 unsafe { Some(&*(inner as *const T)) }
432 /// Gets reference to the inner value, panics if it is not yet initialized
434 pub fn get(&self) -> &T {
435 self.try_get().expect("value was not set")
438 /// Gets reference to the inner value, panics if it is not yet initialized
440 pub fn borrow(&self) -> &T {
446 pub struct Lock<T>(InnerLock<T>);
450 pub fn new(inner: T) -> Self {
451 Lock(InnerLock::new(inner))
455 pub fn into_inner(self) -> T {
460 pub fn get_mut(&mut self) -> &mut T {
464 #[cfg(parallel_compiler)]
466 pub fn try_lock(&self) -> Option<LockGuard<T>> {
470 #[cfg(not(parallel_compiler))]
472 pub fn try_lock(&self) -> Option<LockGuard<T>> {
473 self.0.try_borrow_mut().ok()
476 #[cfg(parallel_compiler)]
478 pub fn lock(&self) -> LockGuard<T> {
480 self.0.try_lock().expect("lock was already held")
486 #[cfg(not(parallel_compiler))]
488 pub fn lock(&self) -> LockGuard<T> {
493 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
498 pub fn borrow(&self) -> LockGuard<T> {
503 pub fn borrow_mut(&self) -> LockGuard<T> {
508 impl<T: Default> Default for Lock<T> {
510 fn default() -> Self {
511 Lock::new(T::default())
515 // FIXME: Probably a bad idea
516 impl<T: Clone> Clone for Lock<T> {
518 fn clone(&self) -> Self {
519 Lock::new(self.borrow().clone())
524 pub struct RwLock<T>(InnerRwLock<T>);
528 pub fn new(inner: T) -> Self {
529 RwLock(InnerRwLock::new(inner))
533 pub fn into_inner(self) -> T {
538 pub fn get_mut(&mut self) -> &mut T {
542 #[cfg(not(parallel_compiler))]
544 pub fn read(&self) -> ReadGuard<T> {
548 #[cfg(parallel_compiler)]
550 pub fn read(&self) -> ReadGuard<T> {
552 self.0.try_read().expect("lock was already held")
559 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
563 #[cfg(not(parallel_compiler))]
565 pub fn try_write(&self) -> Result<WriteGuard<T>, ()> {
566 self.0.try_borrow_mut().map_err(|_| ())
569 #[cfg(parallel_compiler)]
571 pub fn try_write(&self) -> Result<WriteGuard<T>, ()> {
572 self.0.try_write().ok_or(())
575 #[cfg(not(parallel_compiler))]
577 pub fn write(&self) -> WriteGuard<T> {
581 #[cfg(parallel_compiler)]
583 pub fn write(&self) -> WriteGuard<T> {
585 self.0.try_write().expect("lock was already held")
592 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
593 f(&mut *self.write())
597 pub fn borrow(&self) -> ReadGuard<T> {
602 pub fn borrow_mut(&self) -> WriteGuard<T> {
607 // FIXME: Probably a bad idea
608 impl<T: Clone> Clone for RwLock<T> {
610 fn clone(&self) -> Self {
611 RwLock::new(self.borrow().clone())
615 /// A type which only allows its inner value to be used in one thread.
616 /// It will panic if it is used on multiple threads.
617 #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)]
618 pub struct OneThread<T> {
619 #[cfg(parallel_compiler)]
620 thread: thread::ThreadId,
624 #[cfg(parallel_compiler)]
625 unsafe impl<T> std::marker::Sync for OneThread<T> {}
626 #[cfg(parallel_compiler)]
627 unsafe impl<T> std::marker::Send for OneThread<T> {}
629 impl<T> OneThread<T> {
632 #[cfg(parallel_compiler)]
633 assert_eq!(thread::current().id(), self.thread);
637 pub fn new(inner: T) -> Self {
639 #[cfg(parallel_compiler)]
640 thread: thread::current().id(),
646 pub fn into_inner(value: Self) -> T {
652 impl<T> Deref for OneThread<T> {
655 fn deref(&self) -> &T {
661 impl<T> DerefMut for OneThread<T> {
662 fn deref_mut(&mut self) -> &mut T {