1 use crate::cell::UnsafeCell;
2 use crate::collections::VecDeque;
4 use crate::ops::{Deref, DerefMut, Drop};
6 use crate::sync::atomic::{AtomicUsize, Ordering};
7 use crate::sys::hermit::abi;
9 /// This type provides a lock based on busy waiting to realize mutual exclusion
13 /// This structure behaves a lot like a common mutex. There are some differences:
15 /// - By using busy waiting, it can be used outside the runtime.
16 /// - It is a so called ticket lock and is completely fair.
17 #[cfg_attr(target_arch = "x86_64", repr(align(128)))]
18 #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
19 struct Spinlock<T: ?Sized> {
25 unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
26 unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
28 /// A guard to which the protected data can be accessed
30 /// When the guard falls out of scope it will release the lock.
31 struct SpinlockGuard<'a, T: ?Sized + 'a> {
32 dequeue: &'a AtomicUsize,
37 pub const fn new(user_data: T) -> Spinlock<T> {
39 queue: AtomicUsize::new(0),
40 dequeue: AtomicUsize::new(1),
41 data: UnsafeCell::new(user_data),
46 fn obtain_lock(&self) {
47 let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
48 let mut counter: u16 = 0;
49 while self.dequeue.load(Ordering::SeqCst) != ticket {
63 pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
65 SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
69 impl<T: ?Sized + Default> Default for Spinlock<T> {
70 fn default() -> Spinlock<T> {
71 Spinlock::new(Default::default())
75 impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
77 fn deref(&self) -> &T {
82 impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
83 fn deref_mut(&mut self) -> &mut T {
88 impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
89 /// The dropping of the SpinlockGuard will release the lock it was created from.
91 self.dequeue.fetch_add(1, Ordering::SeqCst);
95 /// Realize a priority queue for tasks
96 struct PriorityQueue {
97 queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
102 pub const fn new() -> PriorityQueue {
105 None, None, None, None, None, None, None, None, None, None, None, None, None, None,
106 None, None, None, None, None, None, None, None, None, None, None, None, None, None,
113 /// Add a task id by its priority to the queue
114 pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
115 let i: usize = prio.into().into();
116 self.prio_bitmap |= (1 << i) as u64;
117 if let Some(queue) = &mut self.queues[i] {
120 let mut queue = VecDeque::new();
122 self.queues[i] = Some(queue);
126 fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
127 if let Some(queue) = &mut self.queues[queue_index] {
128 let id = queue.pop_front();
130 if queue.is_empty() {
131 self.prio_bitmap &= !(1 << queue_index as u64);
140 /// Pop the task handle with the highest priority from the queue
141 pub fn pop(&mut self) -> Option<abi::Tid> {
142 for i in 0..abi::NO_PRIORITIES {
143 if self.prio_bitmap & (1 << i) != 0 {
144 return self.pop_from_queue(i);
154 blocked_task: PriorityQueue,
158 pub const fn new() -> MutexInner {
159 MutexInner { locked: false, blocked_task: PriorityQueue::new() }
164 inner: Spinlock<MutexInner>,
167 pub type MovableMutex = Mutex;
169 unsafe impl Send for Mutex {}
170 unsafe impl Sync for Mutex {}
173 pub const fn new() -> Mutex {
174 Mutex { inner: Spinlock::new(MutexInner::new()) }
178 pub unsafe fn lock(&self) {
180 let mut guard = self.inner.lock();
181 if guard.locked == false {
185 let prio = abi::get_priority();
186 let id = abi::getpid();
188 guard.blocked_task.push(prio, id);
189 abi::block_current_task();
197 pub unsafe fn unlock(&self) {
198 let mut guard = self.inner.lock();
199 guard.locked = false;
200 if let Some(tid) = guard.blocked_task.pop() {
201 abi::wakeup_task(tid);
206 pub unsafe fn try_lock(&self) -> bool {
207 let mut guard = self.inner.lock();
208 if guard.locked == false {