1 // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 use sync::atomic::{AtomicU32, Ordering};
14 use sys::cloudabi::abi;
15 use sys::rwlock::{self, RWLock};
19 static __pthread_thread_id: abi::tid;
22 // Implement Mutex using an RWLock. This doesn't introduce any
23 // performance overhead in this environment, as the operations would be
24 // implemented identically.
25 pub struct Mutex(RWLock);
27 pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 {
32 pub const fn new() -> Mutex {
36 pub unsafe fn init(&mut self) {
37 // This function should normally reinitialize the mutex after
38 // moving it to a different memory address. This implementation
39 // does not require adjustments after moving.
42 pub unsafe fn try_lock(&self) -> bool {
46 pub unsafe fn lock(&self) {
50 pub unsafe fn unlock(&self) {
54 pub unsafe fn destroy(&self) {
59 pub struct ReentrantMutex {
60 lock: UnsafeCell<AtomicU32>,
61 recursion: UnsafeCell<u32>,
65 pub unsafe fn uninitialized() -> ReentrantMutex {
69 pub unsafe fn init(&mut self) {
70 self.lock = UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
71 self.recursion = UnsafeCell::new(0);
74 pub unsafe fn try_lock(&self) -> bool {
75 // Attempt to acquire the lock.
76 let lock = self.lock.get();
77 let recursion = self.recursion.get();
78 if let Err(old) = (*lock).compare_exchange(
80 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
84 // If we fail to acquire the lock, it may be the case
85 // that we've already acquired it and may need to recurse.
86 if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
94 assert_eq!(*recursion, 0, "Mutex has invalid recursion count");
99 pub unsafe fn lock(&self) {
100 if !self.try_lock() {
101 // Call into the kernel to acquire a write lock.
102 let lock = self.lock.get();
103 let subscription = abi::subscription {
104 type_: abi::eventtype::LOCK_WRLOCK,
105 union: abi::subscription_union {
106 lock: abi::subscription_lock {
107 lock: lock as *mut abi::lock,
108 lock_scope: abi::scope::PRIVATE,
113 let mut event: abi::event = mem::uninitialized();
114 let mut nevents: usize = mem::uninitialized();
115 let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
116 assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex");
117 assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex");
121 pub unsafe fn unlock(&self) {
122 let lock = self.lock.get();
123 let recursion = self.recursion.get();
125 (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
126 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
127 "This mutex is locked by a different thread"
134 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
135 abi::LOCK_UNLOCKED.0,
141 // Lock is managed by kernelspace. Call into the kernel
142 // to unblock waiting threads.
143 let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
144 assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
148 pub unsafe fn destroy(&self) {
149 let lock = self.lock.get();
150 let recursion = self.recursion.get();
152 (*lock).load(Ordering::Relaxed),
153 abi::LOCK_UNLOCKED.0,
154 "Attempted to destroy locked mutex"
156 assert_eq!(*recursion, 0, "Recursion counter invalid");