5 sync::atomic::{AtomicBool, AtomicUsize, Ordering},
8 /// A mutex implemented by `dis_dsp` (for intra-core synchronization) and a
9 /// spinlock (for inter-core synchronization).
10 pub struct SpinMutex<T = ()> {
15 impl<T> SpinMutex<T> {
17 pub const fn new(x: T) -> Self {
18 Self { locked: AtomicBool::new(false), data: UnsafeCell::new(x) }
23 pub fn with_locked<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
24 struct SpinMutexGuard<'a>(&'a AtomicBool);
26 impl Drop for SpinMutexGuard<'_> {
29 self.0.store(false, Ordering::Release);
30 unsafe { abi::ena_dsp() };
35 if unsafe { abi::sns_dsp() } == 0 {
36 let er = unsafe { abi::dis_dsp() };
37 debug_assert!(er >= 0);
39 // Wait until the current processor acquires a lock.
40 while self.locked.swap(true, Ordering::Acquire) {}
42 _guard = SpinMutexGuard(&self.locked);
45 f(unsafe { &mut *self.data.get() })
49 /// `OnceCell<(abi::ID, T)>` implemented by `dis_dsp` (for intra-core
50 /// synchronization) and a spinlock (for inter-core synchronization).
52 /// It's assumed that `0` is not a valid ID, and all kernel
53 /// object IDs fall into range `1..=usize::MAX`.
54 pub struct SpinIdOnceCell<T = ()> {
57 extra: UnsafeCell<MaybeUninit<T>>,
60 const ID_UNINIT: usize = 0;
62 impl<T> SpinIdOnceCell<T> {
64 pub const fn new() -> Self {
66 id: AtomicUsize::new(ID_UNINIT),
67 extra: UnsafeCell::new(MaybeUninit::uninit()),
68 spin: SpinMutex::new(()),
73 pub fn get(&self) -> Option<(abi::ID, &T)> {
74 match self.id.load(Ordering::Acquire) {
76 id => Some((id as abi::ID, unsafe { (&*self.extra.get()).assume_init_ref() })),
81 pub fn get_mut(&mut self) -> Option<(abi::ID, &mut T)> {
82 match *self.id.get_mut() {
84 id => Some((id as abi::ID, unsafe { (&mut *self.extra.get()).assume_init_mut() })),
89 pub unsafe fn get_unchecked(&self) -> (abi::ID, &T) {
90 (self.id.load(Ordering::Acquire) as abi::ID, unsafe {
91 (&*self.extra.get()).assume_init_ref()
95 /// Assign the content without checking if it's already initialized or
96 /// being initialized.
97 pub unsafe fn set_unchecked(&self, (id, extra): (abi::ID, T)) {
98 debug_assert!(self.get().is_none());
100 // Assumption: A positive `abi::ID` fits in `usize`.
101 debug_assert!(id >= 0);
102 debug_assert!(usize::try_from(id).is_ok());
103 let id = id as usize;
105 unsafe { *self.extra.get() = MaybeUninit::new(extra) };
106 self.id.store(id, Ordering::Release);
109 /// Gets the contents of the cell, initializing it with `f` if
110 /// the cell was empty. If the cell was empty and `f` failed, an
111 /// error is returned.
113 /// Warning: `f` must not perform a blocking operation, which
114 /// includes panicking.
116 pub fn get_or_try_init<F, E>(&self, f: F) -> Result<(abi::ID, &T), E>
118 F: FnOnce() -> Result<(abi::ID, T), E>,
121 if let Some(x) = self.get() {
127 debug_assert!(self.get().is_some());
129 // Safety: The inner value has been initialized
130 Ok(unsafe { self.get_unchecked() })
133 fn initialize<F, E>(&self, f: F) -> Result<(), E>
135 F: FnOnce() -> Result<(abi::ID, T), E>,
137 self.spin.with_locked(|_| {
138 if self.id.load(Ordering::Relaxed) == ID_UNINIT {
139 let (initialized_id, initialized_extra) = f()?;
141 // Assumption: A positive `abi::ID` fits in `usize`.
142 debug_assert!(initialized_id >= 0);
143 debug_assert!(usize::try_from(initialized_id).is_ok());
144 let initialized_id = initialized_id as usize;
146 // Store the initialized contents. Use the release ordering to
147 // make sure the write is visible to the callers of `get`.
148 unsafe { *self.extra.get() = MaybeUninit::new(initialized_extra) };
149 self.id.store(initialized_id, Ordering::Release);
156 impl<T> Drop for SpinIdOnceCell<T> {
159 if self.get_mut().is_some() {
160 unsafe { (&mut *self.extra.get()).assume_init_drop() };