1 use crate::cell::UnsafeCell;
2 use crate::collections::VecDeque;
3 use crate::ffi::c_void;
5 use crate::ops::{Deref, DerefMut, Drop};
7 use crate::sync::atomic::{AtomicUsize, Ordering};
8 use crate::sys::hermit::abi;
10 /// This type provides a lock based on busy waiting to realize mutual exclusion
14 /// This structure behaves a lot like a common mutex. There are some differences:
16 /// - By using busy waiting, it can be used outside the runtime.
17 /// - It is a so called ticket lock and is completly fair.
18 #[cfg_attr(target_arch = "x86_64", repr(align(128)))]
19 #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
20 struct Spinlock<T: ?Sized> {
26 unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
27 unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
29 /// A guard to which the protected data can be accessed
31 /// When the guard falls out of scope it will release the lock.
32 struct SpinlockGuard<'a, T: ?Sized + 'a> {
33 dequeue: &'a AtomicUsize,
38 pub const fn new(user_data: T) -> Spinlock<T> {
40 queue: AtomicUsize::new(0),
41 dequeue: AtomicUsize::new(1),
42 data: UnsafeCell::new(user_data),
47 fn obtain_lock(&self) {
48 let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
49 while self.dequeue.load(Ordering::SeqCst) != ticket {
55 pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
57 SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
61 impl<T: ?Sized + Default> Default for Spinlock<T> {
62 fn default() -> Spinlock<T> {
63 Spinlock::new(Default::default())
67 impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
69 fn deref(&self) -> &T {
74 impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
75 fn deref_mut(&mut self) -> &mut T {
80 impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
81 /// The dropping of the SpinlockGuard will release the lock it was created from.
83 self.dequeue.fetch_add(1, Ordering::SeqCst);
87 /// Realize a priority queue for tasks
88 struct PriorityQueue {
89 queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
94 pub const fn new() -> PriorityQueue {
97 None, None, None, None, None, None, None, None, None, None, None, None, None, None,
98 None, None, None, None, None, None, None, None, None, None, None, None, None, None,
105 /// Add a task id by its priority to the queue
106 pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
107 let i: usize = prio.into().into();
108 self.prio_bitmap |= (1 << i) as u64;
109 if let Some(queue) = &mut self.queues[i] {
112 let mut queue = VecDeque::new();
114 self.queues[i] = Some(queue);
118 fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
119 if let Some(queue) = &mut self.queues[queue_index] {
120 let id = queue.pop_front();
122 if queue.is_empty() {
123 self.prio_bitmap &= !(1 << queue_index as u64);
132 /// Pop the task handle with the highest priority from the queue
133 pub fn pop(&mut self) -> Option<abi::Tid> {
134 for i in 0..abi::NO_PRIORITIES {
135 if self.prio_bitmap & (1 << i) != 0 {
136 return self.pop_from_queue(i);
146 blocked_task: PriorityQueue,
150 pub const fn new() -> MutexInner {
151 MutexInner { locked: false, blocked_task: PriorityQueue::new() }
156 inner: Spinlock<MutexInner>,
159 pub type MovableMutex = Box<Mutex>;
161 unsafe impl Send for Mutex {}
162 unsafe impl Sync for Mutex {}
165 pub const fn new() -> Mutex {
166 Mutex { inner: Spinlock::new(MutexInner::new()) }
170 pub unsafe fn init(&mut self) {
171 self.inner = Spinlock::new(MutexInner::new());
175 pub unsafe fn lock(&self) {
177 let mut guard = self.inner.lock();
178 if guard.locked == false {
182 let prio = abi::get_priority();
183 let id = abi::getpid();
185 guard.blocked_task.push(prio, id);
186 abi::block_current_task();
194 pub unsafe fn unlock(&self) {
195 let mut guard = self.inner.lock();
196 guard.locked = false;
197 if let Some(tid) = guard.blocked_task.pop() {
198 abi::wakeup_task(tid);
203 pub unsafe fn try_lock(&self) -> bool {
204 let mut guard = self.inner.lock();
205 if guard.locked == false {
212 pub unsafe fn destroy(&self) {}
215 pub struct ReentrantMutex {
216 inner: *const c_void,
219 impl ReentrantMutex {
220 pub const unsafe fn uninitialized() -> ReentrantMutex {
221 ReentrantMutex { inner: ptr::null() }
225 pub unsafe fn init(&self) {
226 let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _);
230 pub unsafe fn lock(&self) {
231 let _ = abi::recmutex_lock(self.inner);
235 pub unsafe fn try_lock(&self) -> bool {
240 pub unsafe fn unlock(&self) {
241 let _ = abi::recmutex_unlock(self.inner);
245 pub unsafe fn destroy(&self) {
246 let _ = abi::recmutex_destroy(self.inner);