]> git.lizzy.rs Git - rust.git/commitdiff
Small cleanups in Windows Mutex.
authorMara Bos <m-ou.se@m-ou.se>
Sat, 12 Sep 2020 18:50:17 +0000 (20:50 +0200)
committerMara Bos <m-ou.se@m-ou.se>
Sat, 12 Sep 2020 18:50:17 +0000 (20:50 +0200)
- Move `held` into the boxed part, since the SRW lock implementation
  does not use this. This makes the Mutex 50% smaller.
- Use `Cell` instead of `UnsafeCell` for `held`, such that `.replace()`
  can be used.
- Add some comments.

library/std/src/sys/windows/mutex.rs

index 63dfc640908e94da6e82418dd0368fd0df43b9e7..f2c57025a55ba6f2e86e819c9958f7dc47c2de3a 100644 (file)
 //! CriticalSection is used and we keep track of who's holding the mutex to
 //! detect recursive locks.
 
-use crate::cell::UnsafeCell;
+use crate::cell::{Cell, UnsafeCell};
 use crate::mem::{self, MaybeUninit};
 use crate::sync::atomic::{AtomicUsize, Ordering};
 use crate::sys::c;
 use crate::sys::compat;
 
 pub struct Mutex {
+    // This is either directly an SRWLOCK (if supported), or a Box<Inner> otherwise.
     lock: AtomicUsize,
-    held: UnsafeCell<bool>,
 }
 
 unsafe impl Send for Mutex {}
 unsafe impl Sync for Mutex {}
 
+struct Inner {
+    remutex: ReentrantMutex,
+    held: Cell<bool>,
+}
+
 #[derive(Clone, Copy)]
 enum Kind {
     SRWLock = 1,
@@ -51,7 +56,6 @@ pub const fn new() -> Mutex {
             // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
             // initializing an SRWLOCK here.
             lock: AtomicUsize::new(0),
-            held: UnsafeCell::new(false),
         }
     }
     #[inline]
@@ -60,10 +64,11 @@ pub unsafe fn lock(&self) {
         match kind() {
             Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
             Kind::CriticalSection => {
-                let re = self.remutex();
-                (*re).lock();
-                if !self.flag_locked() {
-                    (*re).unlock();
+                let inner = &mut *self.inner();
+                inner.remutex.lock();
+                if inner.held.replace(true) {
+                    // It was already locked, so we got a recursive lock which we do not want.
+                    inner.remutex.unlock();
                     panic!("cannot recursively lock a mutex");
                 }
             }
@@ -73,23 +78,27 @@ pub unsafe fn try_lock(&self) -> bool {
         match kind() {
             Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
             Kind::CriticalSection => {
-                let re = self.remutex();
-                if !(*re).try_lock() {
+                let inner = &mut *self.inner();
+                if !inner.remutex.try_lock() {
                     false
-                } else if self.flag_locked() {
-                    true
-                } else {
-                    (*re).unlock();
+                } else if inner.held.replace(true) {
+                    // It was already locked, so we got a recursive lock which we do not want.
+                    inner.remutex.unlock();
                     false
+                } else {
+                    true
                 }
             }
         }
     }
     pub unsafe fn unlock(&self) {
-        *self.held.get() = false;
         match kind() {
             Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
-            Kind::CriticalSection => (*self.remutex()).unlock(),
+            Kind::CriticalSection => {
+                let inner = &mut *(self.lock.load(Ordering::SeqCst) as *mut Inner);
+                inner.held.set(false);
+                inner.remutex.unlock();
+            }
         }
     }
     pub unsafe fn destroy(&self) {
@@ -98,37 +107,28 @@ pub unsafe fn destroy(&self) {
             Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
                 0 => {}
                 n => {
-                    Box::from_raw(n as *mut ReentrantMutex).destroy();
+                    Box::from_raw(n as *mut Inner).remutex.destroy();
                 }
             },
         }
     }
 
-    unsafe fn remutex(&self) -> *mut ReentrantMutex {
+    unsafe fn inner(&self) -> *mut Inner {
         match self.lock.load(Ordering::SeqCst) {
             0 => {}
             n => return n as *mut _,
         }
-        let re = box ReentrantMutex::uninitialized();
-        re.init();
-        let re = Box::into_raw(re);
-        match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
-            0 => re,
+        let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
+        inner.remutex.init();
+        let inner = Box::into_raw(inner);
+        match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
+            0 => inner,
             n => {
-                Box::from_raw(re).destroy();
+                Box::from_raw(inner).remutex.destroy();
                 n as *mut _
             }
         }
     }
-
-    unsafe fn flag_locked(&self) -> bool {
-        if *self.held.get() {
-            false
-        } else {
-            *self.held.get() = true;
-            true
-        }
-    }
 }
 
 fn kind() -> Kind {