1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! The Windows implementation of mutexes is a little odd and it may not be
14 //! immediately obvious what's going on. The primary oddness is that SRWLock is
15 //! used instead of CriticalSection, and this is done because:
17 //! 1. SRWLock is several times faster than CriticalSection according to
18 //! benchmarks performed on both Windows 8 and Windows 7.
20 //! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
21 //! Unix implementation deadlocks so consistency is preferred. See #19962 for
24 //! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
25 //! is that there are no guarantees of fairness.
27 //! The downside of this approach, however, is that SRWLock is not available on
28 //! Windows XP, so we continue to have a fallback implementation where
29 //! CriticalSection is used and we keep track of who's holding the mutex to
30 //! detect recursive locks.
34 use sync::atomic::{AtomicUsize, Ordering};
40 held: UnsafeCell<bool>,
43 unsafe impl Send for Mutex {}
44 unsafe impl Sync for Mutex {}
46 #[derive(Clone, Copy)]
53 pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
54 debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock));
55 &m.lock as *const _ as *mut _
59 pub const fn new() -> Mutex {
61 lock: AtomicUsize::new(0),
62 held: UnsafeCell::new(false),
66 pub unsafe fn init(&mut self) {}
67 pub unsafe fn lock(&self) {
69 Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
70 Kind::CriticalSection => {
71 let re = self.remutex();
73 if !self.flag_locked() {
75 panic!("cannot recursively lock a mutex");
80 pub unsafe fn try_lock(&self) -> bool {
82 Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
83 Kind::CriticalSection => {
84 let re = self.remutex();
85 if !(*re).try_lock() {
87 } else if self.flag_locked() {
96 pub unsafe fn unlock(&self) {
97 *self.held.get() = false;
99 Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
100 Kind::CriticalSection => (*self.remutex()).unlock(),
103 pub unsafe fn destroy(&self) {
106 Kind::CriticalSection => {
107 match self.lock.load(Ordering::SeqCst) {
109 n => { Box::from_raw(n as *mut ReentrantMutex).destroy(); }
115 unsafe fn remutex(&self) -> *mut ReentrantMutex {
116 match self.lock.load(Ordering::SeqCst) {
118 n => return n as *mut _,
120 let mut re = Box::new(ReentrantMutex::uninitialized());
122 let re = Box::into_raw(re);
123 match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
125 n => { Box::from_raw(re).destroy(); n as *mut _ }
129 unsafe fn flag_locked(&self) -> bool {
130 if *self.held.get() {
133 *self.held.get() = true;
141 static KIND: AtomicUsize = AtomicUsize::new(0);
143 let val = KIND.load(Ordering::SeqCst);
144 if val == Kind::SRWLock as usize {
146 } else if val == Kind::CriticalSection as usize {
147 return Kind::CriticalSection
150 let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") {
151 None => Kind::CriticalSection,
152 Some(..) => Kind::SRWLock,
154 KIND.store(ret as usize, Ordering::SeqCst);
158 pub struct ReentrantMutex { inner: UnsafeCell<c::CRITICAL_SECTION> }
160 unsafe impl Send for ReentrantMutex {}
161 unsafe impl Sync for ReentrantMutex {}
163 impl ReentrantMutex {
164 pub unsafe fn uninitialized() -> ReentrantMutex {
168 pub unsafe fn init(&mut self) {
169 c::InitializeCriticalSection(self.inner.get());
172 pub unsafe fn lock(&self) {
173 c::EnterCriticalSection(self.inner.get());
177 pub unsafe fn try_lock(&self) -> bool {
178 c::TryEnterCriticalSection(self.inner.get()) != 0
181 pub unsafe fn unlock(&self) {
182 c::LeaveCriticalSection(self.inner.get());
185 pub unsafe fn destroy(&self) {
186 c::DeleteCriticalSection(self.inner.get());