1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! A native mutex and condition variable type.
13 //! This module contains bindings to the platform's native mutex/condition
14 //! variable primitives. It provides two types: `StaticNativeMutex`, which can
15 //! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple
16 //! wrapper `NativeMutex` that has a destructor to clean up after itself. These
17 //! objects serve as both mutexes and condition variables simultaneously.
19 //! The static lock is lazily initialized, but it can only be unsafely
20 //! destroyed. A statically initialized lock doesn't necessarily have a time at
21 //! which it can get deallocated. For this reason, there is no `Drop`
22 //! implementation of the static mutex, but rather the `destroy()` method must
23 //! be invoked manually if destruction of the mutex is desired.
25 //! The non-static `NativeMutex` type does have a destructor, but cannot be
26 //! statically initialized.
28 //! It is not recommended to use this type for idiomatic rust use. These types
29 //! are appropriate where no other options are available, but other rust
30 //! concurrency primitives should be used before them: the `sync` crate defines
31 //! `StaticMutex` and `Mutex` types.
36 //! use std::unstable::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
38 //! // Use a statically initialized mutex
39 //! static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
42 //! let _guard = LOCK.lock();
43 //! } // automatically unlocked here
45 //! // Use a normally initialized mutex
47 //! let mut lock = NativeMutex::new();
50 //! let _guard = lock.lock();
51 //! } // unlocked here
53 //! // sometimes the RAII guard isn't appropriate
54 //! lock.lock_noguard();
55 //! lock.unlock_noguard();
56 //! } // `lock` is deallocated here
59 #[allow(non_camel_case_types)];
61 use option::{Option, None, Some};
64 /// A native mutex suitable for storing in statics (that is, it has
65 /// the `destroy` method rather than a destructor).
67 /// Prefer the `NativeMutex` type where possible, since that does not
68 /// require manual deallocation.
69 pub struct StaticNativeMutex {
70 priv inner: imp::Mutex,
73 /// A native mutex with a destructor for clean-up.
75 /// See `StaticNativeMutex` for a version that is suitable for storing in
77 pub struct NativeMutex {
78 priv inner: StaticNativeMutex
81 /// Automatically unlocks the mutex that it was created from on
84 /// Using this makes lock-based code resilient to unwinding/task
85 /// failure, because the lock will be automatically unlocked even
88 pub struct LockGuard<'a> {
89 priv lock: &'a StaticNativeMutex
92 pub static NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
93 inner: imp::MUTEX_INIT,
96 impl StaticNativeMutex {
97 /// Creates a new mutex.
99 /// Note that a mutex created in this way needs to be explicit
100 /// freed with a call to `destroy` or it will leak.
101 pub unsafe fn new() -> StaticNativeMutex {
102 StaticNativeMutex { inner: imp::Mutex::new() }
105 /// Acquires this lock. This assumes that the current thread does not
106 /// already hold the lock.
111 /// use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
112 /// static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
114 /// let _guard = LOCK.lock();
115 /// // critical section...
116 /// } // automatically unlocked in `_guard`'s destructor
118 pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
121 LockGuard { lock: self }
124 /// Attempts to acquire the lock. The value returned is `Some` if
125 /// the attempt succeeded.
126 pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
127 if self.inner.trylock() {
128 Some(LockGuard { lock: self })
134 /// Acquire the lock without creating a `LockGuard`.
136 /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
138 pub unsafe fn lock_noguard(&self) { self.inner.lock() }
140 /// Attempts to acquire the lock without creating a
141 /// `LockGuard`. The value returned is whether the lock was
144 /// If `true` is returned, this needs to be paired with a call to
145 /// `.unlock_noguard`. Prefer using `.trylock`.
146 pub unsafe fn trylock_noguard(&self) -> bool {
150 /// Unlocks the lock. This assumes that the current thread already holds the
152 pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
154 /// Block on the internal condition variable.
156 /// This function assumes that the lock is already held. Prefer
157 /// using `LockGuard.wait` since that guarantees that the lock is
159 pub unsafe fn wait_noguard(&self) { self.inner.wait() }
161 /// Signals a thread in `wait` to wake up
162 pub unsafe fn signal_noguard(&self) { self.inner.signal() }
164 /// This function is especially unsafe because there are no guarantees made
165 /// that no other thread is currently holding the lock or waiting on the
166 /// condition variable contained inside.
167 pub unsafe fn destroy(&self) { self.inner.destroy() }
171 /// Creates a new mutex.
173 /// The user must be careful to ensure the mutex is not locked when its is
175 pub unsafe fn new() -> NativeMutex {
176 NativeMutex { inner: StaticNativeMutex::new() }
179 /// Acquires this lock. This assumes that the current thread does not
180 /// already hold the lock.
184 /// use std::unstable::mutex::NativeMutex;
186 /// let mut lock = NativeMutex::new();
189 /// let _guard = lock.lock();
190 /// // critical section...
191 /// } // automatically unlocked in `_guard`'s destructor
194 pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
198 /// Attempts to acquire the lock. The value returned is `Some` if
199 /// the attempt succeeded.
200 pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
204 /// Acquire the lock without creating a `LockGuard`.
206 /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
208 pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
210 /// Attempts to acquire the lock without creating a
211 /// `LockGuard`. The value returned is whether the lock was
214 /// If `true` is returned, this needs to be paired with a call to
215 /// `.unlock_noguard`. Prefer using `.trylock`.
216 pub unsafe fn trylock_noguard(&self) -> bool {
217 self.inner.trylock_noguard()
220 /// Unlocks the lock. This assumes that the current thread already holds the
222 pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
224 /// Block on the internal condition variable.
226 /// This function assumes that the lock is already held. Prefer
227 /// using `LockGuard.wait` since that guarantees that the lock is
229 pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
231 /// Signals a thread in `wait` to wake up
232 pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
235 impl Drop for NativeMutex {
237 unsafe {self.inner.destroy()}
241 impl<'a> LockGuard<'a> {
242 /// Block on the internal condition variable.
243 pub unsafe fn wait(&self) {
244 self.lock.wait_noguard()
247 /// Signals a thread in `wait` to wake up.
248 pub unsafe fn signal(&self) {
249 self.lock.signal_noguard()
254 impl<'a> Drop for LockGuard<'a> {
256 unsafe {self.lock.unlock_noguard()}
263 use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER,
264 pthread_mutex_t, pthread_cond_t};
269 type pthread_mutexattr_t = libc::c_void;
270 type pthread_condattr_t = libc::c_void;
272 #[cfg(target_os = "freebsd")]
276 pub type pthread_mutex_t = *libc::c_void;
277 pub type pthread_cond_t = *libc::c_void;
279 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t =
280 0 as pthread_mutex_t;
281 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t =
285 #[cfg(target_os = "macos")]
289 #[cfg(target_arch = "x86_64")]
290 static __PTHREAD_MUTEX_SIZE__: uint = 56;
291 #[cfg(target_arch = "x86_64")]
292 static __PTHREAD_COND_SIZE__: uint = 40;
293 #[cfg(target_arch = "x86")]
294 static __PTHREAD_MUTEX_SIZE__: uint = 40;
295 #[cfg(target_arch = "x86")]
296 static __PTHREAD_COND_SIZE__: uint = 24;
297 static _PTHREAD_MUTEX_SIG_init: libc::c_long = 0x32AAABA7;
298 static _PTHREAD_COND_SIG_init: libc::c_long = 0x3CB0B1BB;
300 pub struct pthread_mutex_t {
302 __opaque: [u8, ..__PTHREAD_MUTEX_SIZE__],
304 pub struct pthread_cond_t {
306 __opaque: [u8, ..__PTHREAD_COND_SIZE__],
309 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
310 __sig: _PTHREAD_MUTEX_SIG_init,
311 __opaque: [0, ..__PTHREAD_MUTEX_SIZE__],
313 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
314 __sig: _PTHREAD_COND_SIG_init,
315 __opaque: [0, ..__PTHREAD_COND_SIZE__],
319 #[cfg(target_os = "linux")]
323 // minus 8 because we have an 'align' field
324 #[cfg(target_arch = "x86_64")]
325 static __SIZEOF_PTHREAD_MUTEX_T: uint = 40 - 8;
326 #[cfg(target_arch = "x86")]
327 static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
328 #[cfg(target_arch = "arm")]
329 static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
330 #[cfg(target_arch = "mips")]
331 static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
332 #[cfg(target_arch = "x86_64")]
333 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
334 #[cfg(target_arch = "x86")]
335 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
336 #[cfg(target_arch = "arm")]
337 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
338 #[cfg(target_arch = "mips")]
339 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
341 pub struct pthread_mutex_t {
342 __align: libc::c_longlong,
343 size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T],
345 pub struct pthread_cond_t {
346 __align: libc::c_longlong,
347 size: [u8, ..__SIZEOF_PTHREAD_COND_T],
350 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
352 size: [0, ..__SIZEOF_PTHREAD_MUTEX_T],
354 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
356 size: [0, ..__SIZEOF_PTHREAD_COND_T],
359 #[cfg(target_os = "android")]
363 pub struct pthread_mutex_t { value: libc::c_int }
364 pub struct pthread_cond_t { value: libc::c_int }
366 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
369 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
375 priv lock: Unsafe<pthread_mutex_t>,
376 priv cond: Unsafe<pthread_cond_t>,
379 pub static MUTEX_INIT: Mutex = Mutex {
381 value: PTHREAD_MUTEX_INITIALIZER,
382 marker1: marker::InvariantType,
385 value: PTHREAD_COND_INITIALIZER,
386 marker1: marker::InvariantType,
391 pub unsafe fn new() -> Mutex {
393 lock: Unsafe::new(mem::init()),
394 cond: Unsafe::new(mem::init()),
397 pthread_mutex_init(m.lock.get(), 0 as *libc::c_void);
398 pthread_cond_init(m.cond.get(), 0 as *libc::c_void);
403 pub unsafe fn lock(&self) { pthread_mutex_lock(self.lock.get()); }
404 pub unsafe fn unlock(&self) { pthread_mutex_unlock(self.lock.get()); }
405 pub unsafe fn signal(&self) { pthread_cond_signal(self.cond.get()); }
406 pub unsafe fn wait(&self) {
407 pthread_cond_wait(self.cond.get(), self.lock.get());
409 pub unsafe fn trylock(&self) -> bool {
410 pthread_mutex_trylock(self.lock.get()) == 0
412 pub unsafe fn destroy(&self) {
413 pthread_mutex_destroy(self.lock.get());
414 pthread_cond_destroy(self.cond.get());
419 fn pthread_mutex_init(lock: *mut pthread_mutex_t,
420 attr: *pthread_mutexattr_t) -> libc::c_int;
421 fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int;
422 fn pthread_cond_init(cond: *mut pthread_cond_t,
423 attr: *pthread_condattr_t) -> libc::c_int;
424 fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> libc::c_int;
425 fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int;
426 fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int;
427 fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int;
429 fn pthread_cond_wait(cond: *mut pthread_cond_t,
430 lock: *mut pthread_mutex_t) -> libc::c_int;
431 fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int;
437 use rt::global_heap::malloc_raw;
438 use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
443 type LPCRITICAL_SECTION = *mut c_void;
444 static SPIN_COUNT: DWORD = 4000;
445 #[cfg(target_arch = "x86")]
446 static CRIT_SECTION_SIZE: uint = 24;
449 // pointers for the lock/cond handles, atomically updated
450 priv lock: atomics::AtomicUint,
451 priv cond: atomics::AtomicUint,
454 pub static MUTEX_INIT: Mutex = Mutex {
455 lock: atomics::INIT_ATOMIC_UINT,
456 cond: atomics::INIT_ATOMIC_UINT,
460 pub unsafe fn new() -> Mutex {
462 lock: atomics::AtomicUint::new(init_lock()),
463 cond: atomics::AtomicUint::new(init_cond()),
466 pub unsafe fn lock(&self) {
467 EnterCriticalSection(self.getlock() as LPCRITICAL_SECTION)
469 pub unsafe fn trylock(&self) -> bool {
470 TryEnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) != 0
472 pub unsafe fn unlock(&self) {
473 LeaveCriticalSection(self.getlock() as LPCRITICAL_SECTION)
476 pub unsafe fn wait(&self) {
478 WaitForSingleObject(self.getcond() as HANDLE, libc::INFINITE);
482 pub unsafe fn signal(&self) {
483 assert!(SetEvent(self.getcond() as HANDLE) != 0);
486 /// This function is especially unsafe because there are no guarantees made
487 /// that no other thread is currently holding the lock or waiting on the
488 /// condition variable contained inside.
489 pub unsafe fn destroy(&self) {
490 let lock = self.lock.swap(0, atomics::SeqCst);
491 let cond = self.cond.swap(0, atomics::SeqCst);
492 if lock != 0 { free_lock(lock) }
493 if cond != 0 { free_cond(cond) }
496 unsafe fn getlock(&self) -> *mut c_void {
497 match self.lock.load(atomics::SeqCst) {
499 n => return n as *mut c_void
501 let lock = init_lock();
502 match self.lock.compare_and_swap(0, lock, atomics::SeqCst) {
503 0 => return lock as *mut c_void,
507 return self.lock.load(atomics::SeqCst) as *mut c_void;
510 unsafe fn getcond(&self) -> *mut c_void {
511 match self.cond.load(atomics::SeqCst) {
513 n => return n as *mut c_void
515 let cond = init_cond();
516 match self.cond.compare_and_swap(0, cond, atomics::SeqCst) {
517 0 => return cond as *mut c_void,
521 return self.cond.load(atomics::SeqCst) as *mut c_void;
525 pub unsafe fn init_lock() -> uint {
526 let block = malloc_raw(CRIT_SECTION_SIZE as uint) as *mut c_void;
527 InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
528 return block as uint;
531 pub unsafe fn init_cond() -> uint {
532 return CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
533 ptr::null()) as uint;
536 pub unsafe fn free_lock(h: uint) {
537 DeleteCriticalSection(h as LPCRITICAL_SECTION);
538 libc::free(h as *mut c_void);
541 pub unsafe fn free_cond(h: uint) {
542 let block = h as HANDLE;
543 libc::CloseHandle(block);
547 fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
550 lpName: LPCSTR) -> HANDLE;
551 fn InitializeCriticalSectionAndSpinCount(
552 lpCriticalSection: LPCRITICAL_SECTION,
553 dwSpinCount: DWORD) -> BOOL;
554 fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
555 fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
556 fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
557 fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
558 fn SetEvent(hEvent: HANDLE) -> BOOL;
559 fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
568 use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
569 use rt::thread::Thread;
573 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
575 let _guard = lock.lock();
581 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
583 let guard = lock.lock();
584 let t = Thread::start(proc() {
585 let guard = lock.lock();
596 fn smoke_lock_noguard() {
597 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
600 lock.unlock_noguard();
605 fn smoke_cond_noguard() {
606 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
609 let t = Thread::start(proc() {
611 lock.signal_noguard();
612 lock.unlock_noguard();
615 lock.unlock_noguard();
622 fn destroy_immediately() {
624 let mut m = StaticNativeMutex::new();