1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! A native mutex and condition variable type.
13 //! This module contains bindings to the platform's native mutex/condition
14 //! variable primitives. It provides two types: `StaticNativeMutex`, which can
15 //! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple
16 //! wrapper `NativeMutex` that has a destructor to clean up after itself. These
17 //! objects serve as both mutexes and condition variables simultaneously.
19 //! The static lock is lazily initialized, but it can only be unsafely
20 //! destroyed. A statically initialized lock doesn't necessarily have a time at
21 //! which it can get deallocated. For this reason, there is no `Drop`
22 //! implementation of the static mutex, but rather the `destroy()` method must
23 //! be invoked manually if destruction of the mutex is desired.
25 //! The non-static `NativeMutex` type does have a destructor, but cannot be
26 //! statically initialized.
28 //! It is not recommended to use this type for idiomatic rust use. These types
29 //! are appropriate where no other options are available, but other rust
30 //! concurrency primitives should be used before them: the `sync` crate defines
31 //! `StaticMutex` and `Mutex` types.
36 //! use rustrt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
38 //! // Use a statically initialized mutex
39 //! static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
42 //! let _guard = LOCK.lock();
43 //! } // automatically unlocked here
45 //! // Use a normally initialized mutex
47 //! let mut lock = NativeMutex::new();
50 //! let _guard = lock.lock();
51 //! } // unlocked here
53 //! // sometimes the RAII guard isn't appropriate
54 //! lock.lock_noguard();
55 //! lock.unlock_noguard();
56 //! } // `lock` is deallocated here
59 #![allow(non_camel_case_types)]
63 /// A native mutex suitable for storing in statics (that is, it has
64 /// the `destroy` method rather than a destructor).
66 /// Prefer the `NativeMutex` type where possible, since that does not
67 /// require manual deallocation.
68 pub struct StaticNativeMutex {
72 /// A native mutex with a destructor for clean-up.
74 /// See `StaticNativeMutex` for a version that is suitable for storing in
76 pub struct NativeMutex {
77 inner: StaticNativeMutex
80 /// Automatically unlocks the mutex that it was created from on
83 /// Using this makes lock-based code resilient to unwinding/task
84 /// panic, because the lock will be automatically unlocked even
87 pub struct LockGuard<'a> {
88 lock: &'a StaticNativeMutex
91 pub const NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
92 inner: imp::MUTEX_INIT,
95 impl StaticNativeMutex {
96 /// Creates a new mutex.
98 /// Note that a mutex created in this way needs to be explicit
99 /// freed with a call to `destroy` or it will leak.
100 /// Also it is important to avoid locking until mutex has stopped moving
101 pub unsafe fn new() -> StaticNativeMutex {
102 StaticNativeMutex { inner: imp::Mutex::new() }
105 /// Acquires this lock. This assumes that the current thread does not
106 /// already hold the lock.
111 /// use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
112 /// static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
114 /// let _guard = LOCK.lock();
115 /// // critical section...
116 /// } // automatically unlocked in `_guard`'s destructor
121 /// This method is unsafe because it will not function correctly if this
122 /// mutex has been *moved* since it was last used. The mutex can move an
123 /// arbitrary number of times before its first usage, but once a mutex has
124 /// been used once it is no longer allowed to move (or otherwise it invokes
125 /// undefined behavior).
127 /// Additionally, this type does not take into account any form of
128 /// scheduling model. This will unconditionally block the *os thread* which
129 /// is not always desired.
130 pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
133 LockGuard { lock: self }
136 /// Attempts to acquire the lock. The value returned is `Some` if
137 /// the attempt succeeded.
141 /// This method is unsafe for the same reasons as `lock`.
142 pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
143 if self.inner.trylock() {
144 Some(LockGuard { lock: self })
150 /// Acquire the lock without creating a `LockGuard`.
152 /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
157 /// This method is unsafe for the same reasons as `lock`. Additionally, this
158 /// does not guarantee that the mutex will ever be unlocked, and it is
159 /// undefined to drop an already-locked mutex.
160 pub unsafe fn lock_noguard(&self) { self.inner.lock() }
162 /// Attempts to acquire the lock without creating a
163 /// `LockGuard`. The value returned is whether the lock was
166 /// If `true` is returned, this needs to be paired with a call to
167 /// `.unlock_noguard`. Prefer using `.trylock`.
171 /// This method is unsafe for the same reasons as `lock_noguard`.
172 pub unsafe fn trylock_noguard(&self) -> bool {
176 /// Unlocks the lock. This assumes that the current thread already holds the
181 /// This method is unsafe for the same reasons as `lock`. Additionally, it
182 /// is not guaranteed that this is unlocking a previously locked mutex. It
183 /// is undefined to unlock an unlocked mutex.
184 pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
186 /// Block on the internal condition variable.
188 /// This function assumes that the lock is already held. Prefer
189 /// using `LockGuard.wait` since that guarantees that the lock is
194 /// This method is unsafe for the same reasons as `lock`. Additionally, this
195 /// is unsafe because the mutex may not be currently locked.
196 pub unsafe fn wait_noguard(&self) { self.inner.wait() }
198 /// Signals a thread in `wait` to wake up
202 /// This method is unsafe for the same reasons as `lock`. Additionally, this
203 /// is unsafe because the mutex may not be currently locked.
204 pub unsafe fn signal_noguard(&self) { self.inner.signal() }
206 /// This function is especially unsafe because there are no guarantees made
207 /// that no other thread is currently holding the lock or waiting on the
208 /// condition variable contained inside.
209 pub unsafe fn destroy(&self) { self.inner.destroy() }
213 /// Creates a new mutex.
215 /// The user must be careful to ensure the mutex is not locked when its is
217 /// Also it is important to avoid locking until mutex has stopped moving
218 pub unsafe fn new() -> NativeMutex {
219 NativeMutex { inner: StaticNativeMutex::new() }
222 /// Acquires this lock. This assumes that the current thread does not
223 /// already hold the lock.
228 /// use rustrt::mutex::NativeMutex;
230 /// let mut lock = NativeMutex::new();
233 /// let _guard = lock.lock();
234 /// // critical section...
235 /// } // automatically unlocked in `_guard`'s destructor
241 /// This method is unsafe due to the same reasons as
242 /// `StaticNativeMutex::lock`.
243 pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
247 /// Attempts to acquire the lock. The value returned is `Some` if
248 /// the attempt succeeded.
252 /// This method is unsafe due to the same reasons as
253 /// `StaticNativeMutex::trylock`.
254 pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
258 /// Acquire the lock without creating a `LockGuard`.
260 /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
265 /// This method is unsafe due to the same reasons as
266 /// `StaticNativeMutex::lock_noguard`.
267 pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
269 /// Attempts to acquire the lock without creating a
270 /// `LockGuard`. The value returned is whether the lock was
273 /// If `true` is returned, this needs to be paired with a call to
274 /// `.unlock_noguard`. Prefer using `.trylock`.
278 /// This method is unsafe due to the same reasons as
279 /// `StaticNativeMutex::trylock_noguard`.
280 pub unsafe fn trylock_noguard(&self) -> bool {
281 self.inner.trylock_noguard()
284 /// Unlocks the lock. This assumes that the current thread already holds the
289 /// This method is unsafe due to the same reasons as
290 /// `StaticNativeMutex::unlock_noguard`.
291 pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
293 /// Block on the internal condition variable.
295 /// This function assumes that the lock is already held. Prefer
296 /// using `LockGuard.wait` since that guarantees that the lock is
301 /// This method is unsafe due to the same reasons as
302 /// `StaticNativeMutex::wait_noguard`.
303 pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
305 /// Signals a thread in `wait` to wake up
309 /// This method is unsafe due to the same reasons as
310 /// `StaticNativeMutex::signal_noguard`.
311 pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
314 impl Drop for NativeMutex {
316 unsafe {self.inner.destroy()}
320 impl<'a> LockGuard<'a> {
321 /// Block on the internal condition variable.
322 pub unsafe fn wait(&self) {
323 self.lock.wait_noguard()
326 /// Signals a thread in `wait` to wake up.
327 pub unsafe fn signal(&self) {
328 self.lock.signal_noguard()
333 impl<'a> Drop for LockGuard<'a> {
335 unsafe {self.lock.unlock_noguard()}
342 use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER,
343 pthread_mutex_t, pthread_cond_t};
344 use core::cell::UnsafeCell;
346 type pthread_mutexattr_t = libc::c_void;
347 type pthread_condattr_t = libc::c_void;
349 #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
353 pub type pthread_mutex_t = *mut libc::c_void;
354 pub type pthread_cond_t = *mut libc::c_void;
356 pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t =
357 0 as pthread_mutex_t;
358 pub const PTHREAD_COND_INITIALIZER: pthread_cond_t =
362 #[cfg(any(target_os = "macos", target_os = "ios"))]
366 #[cfg(target_arch = "x86_64")]
367 const __PTHREAD_MUTEX_SIZE__: uint = 56;
368 #[cfg(target_arch = "x86_64")]
369 const __PTHREAD_COND_SIZE__: uint = 40;
370 #[cfg(target_arch = "x86")]
371 const __PTHREAD_MUTEX_SIZE__: uint = 40;
372 #[cfg(target_arch = "x86")]
373 const __PTHREAD_COND_SIZE__: uint = 24;
374 #[cfg(target_arch = "arm")]
375 const __PTHREAD_MUTEX_SIZE__: uint = 40;
376 #[cfg(target_arch = "arm")]
377 const __PTHREAD_COND_SIZE__: uint = 24;
379 const _PTHREAD_MUTEX_SIG_INIT: libc::c_long = 0x32AAABA7;
380 const _PTHREAD_COND_SIG_INIT: libc::c_long = 0x3CB0B1BB;
383 pub struct pthread_mutex_t {
385 __opaque: [u8, ..__PTHREAD_MUTEX_SIZE__],
388 pub struct pthread_cond_t {
390 __opaque: [u8, ..__PTHREAD_COND_SIZE__],
393 pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
394 __sig: _PTHREAD_MUTEX_SIG_INIT,
395 __opaque: [0, ..__PTHREAD_MUTEX_SIZE__],
397 pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
398 __sig: _PTHREAD_COND_SIG_INIT,
399 __opaque: [0, ..__PTHREAD_COND_SIZE__],
403 #[cfg(target_os = "linux")]
407 // minus 8 because we have an 'align' field
408 #[cfg(target_arch = "x86_64")]
409 const __SIZEOF_PTHREAD_MUTEX_T: uint = 40 - 8;
410 #[cfg(target_arch = "x86")]
411 const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
412 #[cfg(target_arch = "arm")]
413 const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
414 #[cfg(target_arch = "mips")]
415 const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
416 #[cfg(target_arch = "mipsel")]
417 const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
418 #[cfg(target_arch = "x86_64")]
419 const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
420 #[cfg(target_arch = "x86")]
421 const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
422 #[cfg(target_arch = "arm")]
423 const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
424 #[cfg(target_arch = "mips")]
425 const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
426 #[cfg(target_arch = "mipsel")]
427 const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
430 pub struct pthread_mutex_t {
431 __align: libc::c_longlong,
432 size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T],
435 pub struct pthread_cond_t {
436 __align: libc::c_longlong,
437 size: [u8, ..__SIZEOF_PTHREAD_COND_T],
440 pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
442 size: [0, ..__SIZEOF_PTHREAD_MUTEX_T],
444 pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
446 size: [0, ..__SIZEOF_PTHREAD_COND_T],
449 #[cfg(target_os = "android")]
454 pub struct pthread_mutex_t { value: libc::c_int }
456 pub struct pthread_cond_t { value: libc::c_int }
458 pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
461 pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
467 lock: UnsafeCell<pthread_mutex_t>,
468 cond: UnsafeCell<pthread_cond_t>,
471 pub const MUTEX_INIT: Mutex = Mutex {
472 lock: UnsafeCell { value: PTHREAD_MUTEX_INITIALIZER },
473 cond: UnsafeCell { value: PTHREAD_COND_INITIALIZER },
477 pub unsafe fn new() -> Mutex {
478 // As mutex might be moved and address is changing it
479 // is better to avoid initialization of potentially
480 // opaque OS data before it landed
482 lock: UnsafeCell::new(PTHREAD_MUTEX_INITIALIZER),
483 cond: UnsafeCell::new(PTHREAD_COND_INITIALIZER),
489 pub unsafe fn lock(&self) { pthread_mutex_lock(self.lock.get()); }
490 pub unsafe fn unlock(&self) { pthread_mutex_unlock(self.lock.get()); }
491 pub unsafe fn signal(&self) { pthread_cond_signal(self.cond.get()); }
492 pub unsafe fn wait(&self) {
493 pthread_cond_wait(self.cond.get(), self.lock.get());
495 pub unsafe fn trylock(&self) -> bool {
496 pthread_mutex_trylock(self.lock.get()) == 0
498 pub unsafe fn destroy(&self) {
499 pthread_mutex_destroy(self.lock.get());
500 pthread_cond_destroy(self.cond.get());
505 fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int;
506 fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> libc::c_int;
507 fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int;
508 fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int;
509 fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int;
511 fn pthread_cond_wait(cond: *mut pthread_cond_t,
512 lock: *mut pthread_mutex_t) -> libc::c_int;
513 fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int;
522 use core::ptr::RawPtr;
523 use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
526 type LPCRITICAL_SECTION = *mut c_void;
527 const SPIN_COUNT: DWORD = 4000;
528 #[cfg(target_arch = "x86")]
529 const CRIT_SECTION_SIZE: uint = 24;
530 #[cfg(target_arch = "x86_64")]
531 const CRIT_SECTION_SIZE: uint = 40;
534 // pointers for the lock/cond handles, atomically updated
535 lock: atomic::AtomicUint,
536 cond: atomic::AtomicUint,
539 pub const MUTEX_INIT: Mutex = Mutex {
540 lock: atomic::INIT_ATOMIC_UINT,
541 cond: atomic::INIT_ATOMIC_UINT,
545 pub unsafe fn new() -> Mutex {
547 lock: atomic::AtomicUint::new(init_lock()),
548 cond: atomic::AtomicUint::new(init_cond()),
551 pub unsafe fn lock(&self) {
552 EnterCriticalSection(self.getlock() as LPCRITICAL_SECTION)
554 pub unsafe fn trylock(&self) -> bool {
555 TryEnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) != 0
557 pub unsafe fn unlock(&self) {
558 LeaveCriticalSection(self.getlock() as LPCRITICAL_SECTION)
561 pub unsafe fn wait(&self) {
563 WaitForSingleObject(self.getcond() as HANDLE, libc::INFINITE);
567 pub unsafe fn signal(&self) {
568 assert!(SetEvent(self.getcond() as HANDLE) != 0);
571 /// This function is especially unsafe because there are no guarantees made
572 /// that no other thread is currently holding the lock or waiting on the
573 /// condition variable contained inside.
574 pub unsafe fn destroy(&self) {
575 let lock = self.lock.swap(0, atomic::SeqCst);
576 let cond = self.cond.swap(0, atomic::SeqCst);
577 if lock != 0 { free_lock(lock) }
578 if cond != 0 { free_cond(cond) }
581 unsafe fn getlock(&self) -> *mut c_void {
582 match self.lock.load(atomic::SeqCst) {
584 n => return n as *mut c_void
586 let lock = init_lock();
587 match self.lock.compare_and_swap(0, lock, atomic::SeqCst) {
588 0 => return lock as *mut c_void,
592 return self.lock.load(atomic::SeqCst) as *mut c_void;
595 unsafe fn getcond(&self) -> *mut c_void {
596 match self.cond.load(atomic::SeqCst) {
598 n => return n as *mut c_void
600 let cond = init_cond();
601 match self.cond.compare_and_swap(0, cond, atomic::SeqCst) {
602 0 => return cond as *mut c_void,
606 return self.cond.load(atomic::SeqCst) as *mut c_void;
610 pub unsafe fn init_lock() -> uint {
611 let block = heap::allocate(CRIT_SECTION_SIZE, 8) as *mut c_void;
612 if block.is_null() { ::alloc::oom() }
613 InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
614 return block as uint;
617 pub unsafe fn init_cond() -> uint {
618 return CreateEventA(ptr::null_mut(), libc::FALSE, libc::FALSE,
619 ptr::null()) as uint;
622 pub unsafe fn free_lock(h: uint) {
623 DeleteCriticalSection(h as LPCRITICAL_SECTION);
624 heap::deallocate(h as *mut u8, CRIT_SECTION_SIZE, 8);
627 pub unsafe fn free_cond(h: uint) {
628 let block = h as HANDLE;
629 libc::CloseHandle(block);
632 #[allow(non_snake_case)]
634 fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
637 lpName: LPCSTR) -> HANDLE;
638 fn InitializeCriticalSectionAndSpinCount(
639 lpCriticalSection: LPCRITICAL_SECTION,
640 dwSpinCount: DWORD) -> BOOL;
641 fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
642 fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
643 fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
644 fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
645 fn SetEvent(hEvent: HANDLE) -> BOOL;
646 fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
655 use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
660 static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
662 let _guard = LK.lock();
668 static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
670 let guard = LK.lock();
671 let t = Thread::start(proc() {
672 let guard = LK.lock();
683 fn smoke_lock_noguard() {
684 static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
692 fn smoke_cond_noguard() {
693 static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
696 let t = Thread::start(proc() {
709 fn destroy_immediately() {
711 let m = StaticNativeMutex::new();