1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
31 //! The most common way to share an atomic variable is to put it into an `Arc` (an
32 //! atomically-reference-counted shared pointer).
34 //! Most atomic types may be stored in static variables, initialized using
35 //! the provided static initializers like `ATOMIC_BOOL_INIT`. Atomic statics
36 //! are often used for lazy global initialization.
41 //! A simple spinlock:
44 //! use std::sync::Arc;
45 //! use std::sync::atomic::{AtomicUsize, Ordering};
49 //! let spinlock = Arc::new(AtomicUsize::new(1));
51 //! let spinlock_clone = spinlock.clone();
52 //! let thread = thread::spawn(move|| {
53 //! spinlock_clone.store(0, Ordering::SeqCst);
56 //! // Wait for the other thread to release the lock
57 //! while spinlock.load(Ordering::SeqCst) != 0 {}
59 //! if let Err(panic) = thread.join() {
60 //! println!("Thread had an error: {:?}", panic);
65 //! Keep a global count of live threads:
68 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
70 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
72 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
73 //! println!("live threads: {}", old_thread_count + 1);
76 #![stable(feature = "rust1", since = "1.0.0")]
77 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
78 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
80 use self::Ordering::*;
82 use marker::{Send, Sync};
87 use result::Result::{self, Ok, Err};
92 /// A boolean type which can be safely shared between threads.
93 #[cfg(target_has_atomic = "8")]
94 #[stable(feature = "rust1", since = "1.0.0")]
95 pub struct AtomicBool {
99 #[cfg(target_has_atomic = "8")]
100 #[stable(feature = "rust1", since = "1.0.0")]
101 impl Default for AtomicBool {
102 fn default() -> Self {
107 // Send is implicitly implemented for AtomicBool.
108 #[cfg(target_has_atomic = "8")]
109 #[stable(feature = "rust1", since = "1.0.0")]
110 unsafe impl Sync for AtomicBool {}
112 /// A raw pointer type which can be safely shared between threads.
113 #[cfg(target_has_atomic = "ptr")]
114 #[stable(feature = "rust1", since = "1.0.0")]
115 pub struct AtomicPtr<T> {
116 p: UnsafeCell<*mut T>,
119 #[cfg(target_has_atomic = "ptr")]
120 #[stable(feature = "rust1", since = "1.0.0")]
121 impl<T> Default for AtomicPtr<T> {
122 fn default() -> AtomicPtr<T> {
123 AtomicPtr::new(::ptr::null_mut())
127 #[cfg(target_has_atomic = "ptr")]
128 #[stable(feature = "rust1", since = "1.0.0")]
129 unsafe impl<T> Send for AtomicPtr<T> {}
130 #[cfg(target_has_atomic = "ptr")]
131 #[stable(feature = "rust1", since = "1.0.0")]
132 unsafe impl<T> Sync for AtomicPtr<T> {}
134 /// Atomic memory orderings
136 /// Memory orderings limit the ways that both the compiler and CPU may reorder
137 /// instructions around atomic operations. At its most restrictive,
138 /// "sequentially consistent" atomics allow neither reads nor writes
139 /// to be moved either before or after the atomic operation; on the other end
140 /// "relaxed" atomics allow all reorderings.
142 /// Rust's memory orderings are [the same as
143 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
144 #[stable(feature = "rust1", since = "1.0.0")]
145 #[derive(Copy, Clone, Debug)]
147 /// No ordering constraints, only atomic operations. Corresponds to LLVM's
148 /// `Monotonic` ordering.
149 #[stable(feature = "rust1", since = "1.0.0")]
151 /// When coupled with a store, all previous writes become visible
152 /// to the other threads that perform a load with `Acquire` ordering
153 /// on the same value.
154 #[stable(feature = "rust1", since = "1.0.0")]
156 /// When coupled with a load, all subsequent loads will see data
157 /// written before a store with `Release` ordering on the same value
158 /// in other threads.
159 #[stable(feature = "rust1", since = "1.0.0")]
161 /// When coupled with a load, uses `Acquire` ordering, and with a store
162 /// `Release` ordering.
163 #[stable(feature = "rust1", since = "1.0.0")]
165 /// Like `AcqRel` with the additional guarantee that all threads see all
166 /// sequentially consistent operations in the same order.
167 #[stable(feature = "rust1", since = "1.0.0")]
171 /// An `AtomicBool` initialized to `false`.
172 #[cfg(target_has_atomic = "8")]
173 #[stable(feature = "rust1", since = "1.0.0")]
174 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
176 #[cfg(target_has_atomic = "8")]
178 /// Creates a new `AtomicBool`.
183 /// use std::sync::atomic::AtomicBool;
185 /// let atomic_true = AtomicBool::new(true);
186 /// let atomic_false = AtomicBool::new(false);
189 #[stable(feature = "rust1", since = "1.0.0")]
190 pub const fn new(v: bool) -> AtomicBool {
191 AtomicBool { v: UnsafeCell::new(v as u8) }
194 /// Loads a value from the bool.
196 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
200 /// Panics if `order` is `Release` or `AcqRel`.
205 /// use std::sync::atomic::{AtomicBool, Ordering};
207 /// let some_bool = AtomicBool::new(true);
209 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
212 #[stable(feature = "rust1", since = "1.0.0")]
213 pub fn load(&self, order: Ordering) -> bool {
214 unsafe { atomic_load(self.v.get(), order) != 0 }
217 /// Stores a value into the bool.
219 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
224 /// use std::sync::atomic::{AtomicBool, Ordering};
226 /// let some_bool = AtomicBool::new(true);
228 /// some_bool.store(false, Ordering::Relaxed);
229 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
234 /// Panics if `order` is `Acquire` or `AcqRel`.
236 #[stable(feature = "rust1", since = "1.0.0")]
237 pub fn store(&self, val: bool, order: Ordering) {
238 unsafe { atomic_store(self.v.get(), val as u8, order); }
241 /// Stores a value into the bool, returning the old value.
243 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
248 /// use std::sync::atomic::{AtomicBool, Ordering};
250 /// let some_bool = AtomicBool::new(true);
252 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
253 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
256 #[stable(feature = "rust1", since = "1.0.0")]
257 pub fn swap(&self, val: bool, order: Ordering) -> bool {
258 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
261 /// Stores a value into the `bool` if the current value is the same as the `current` value.
263 /// The return value is always the previous value. If it is equal to `current`, then the value
266 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
272 /// use std::sync::atomic::{AtomicBool, Ordering};
274 /// let some_bool = AtomicBool::new(true);
276 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
277 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
279 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
280 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
283 #[stable(feature = "rust1", since = "1.0.0")]
284 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
285 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
291 /// Stores a value into the `bool` if the current value is the same as the `current` value.
293 /// The return value is a result indicating whether the new value was written and containing
294 /// the previous value. On success this value is guaranteed to be equal to `current`.
296 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
297 /// operation. The first describes the required ordering if the operation succeeds while the
298 /// second describes the required ordering when the operation fails. The failure ordering can't
299 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
304 /// use std::sync::atomic::{AtomicBool, Ordering};
306 /// let some_bool = AtomicBool::new(true);
308 /// assert_eq!(some_bool.compare_exchange(true,
310 /// Ordering::Acquire,
311 /// Ordering::Relaxed),
313 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
315 /// assert_eq!(some_bool.compare_exchange(true, true,
316 /// Ordering::SeqCst,
317 /// Ordering::Acquire),
319 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
322 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
323 pub fn compare_exchange(&self,
327 failure: Ordering) -> Result<bool, bool> {
328 match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8,
329 success, failure) } {
331 Err(x) => Err(x != 0),
335 /// Stores a value into the `bool` if the current value is the same as the `current` value.
337 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
338 /// comparison succeeds, which can result in more efficient code on some platforms. The
339 /// return value is a result indicating whether the new value was written and containing the
342 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
343 /// ordering of this operation. The first describes the required ordering if the operation
344 /// succeeds while the second describes the required ordering when the operation fails. The
345 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
346 /// success ordering.
351 /// use std::sync::atomic::{AtomicBool, Ordering};
353 /// let val = AtomicBool::new(false);
356 /// let mut old = val.load(Ordering::Relaxed);
358 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
360 /// Err(x) => old = x,
365 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
366 pub fn compare_exchange_weak(&self,
370 failure: Ordering) -> Result<bool, bool> {
371 match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8,
372 success, failure) } {
374 Err(x) => Err(x != 0),
378 /// Logical "and" with a boolean value.
380 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
381 /// the new value to the result.
383 /// Returns the previous value.
388 /// use std::sync::atomic::{AtomicBool, Ordering};
390 /// let foo = AtomicBool::new(true);
391 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
392 /// assert_eq!(foo.load(Ordering::SeqCst), false);
394 /// let foo = AtomicBool::new(true);
395 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
396 /// assert_eq!(foo.load(Ordering::SeqCst), true);
398 /// let foo = AtomicBool::new(false);
399 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
400 /// assert_eq!(foo.load(Ordering::SeqCst), false);
403 #[stable(feature = "rust1", since = "1.0.0")]
404 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
405 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
408 /// Logical "nand" with a boolean value.
410 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
411 /// the new value to the result.
413 /// Returns the previous value.
418 /// use std::sync::atomic::{AtomicBool, Ordering};
420 /// let foo = AtomicBool::new(true);
421 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
422 /// assert_eq!(foo.load(Ordering::SeqCst), true);
424 /// let foo = AtomicBool::new(true);
425 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
426 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
427 /// assert_eq!(foo.load(Ordering::SeqCst), false);
429 /// let foo = AtomicBool::new(false);
430 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
431 /// assert_eq!(foo.load(Ordering::SeqCst), true);
434 #[stable(feature = "rust1", since = "1.0.0")]
435 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
436 // We can't use atomic_nand here because it can result in a bool with
437 // an invalid value. This happens because the atomic operation is done
438 // with an 8-bit integer internally, which would set the upper 7 bits.
439 // So we just use a compare-exchange loop instead, which is what the
440 // intrinsic actually expands to anyways on many platforms.
441 let mut old = self.load(Relaxed);
443 let new = !(old && val);
444 match self.compare_exchange_weak(old, new, order, Relaxed) {
452 /// Logical "or" with a boolean value.
454 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
455 /// new value to the result.
457 /// Returns the previous value.
462 /// use std::sync::atomic::{AtomicBool, Ordering};
464 /// let foo = AtomicBool::new(true);
465 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
466 /// assert_eq!(foo.load(Ordering::SeqCst), true);
468 /// let foo = AtomicBool::new(true);
469 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
470 /// assert_eq!(foo.load(Ordering::SeqCst), true);
472 /// let foo = AtomicBool::new(false);
473 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
474 /// assert_eq!(foo.load(Ordering::SeqCst), false);
477 #[stable(feature = "rust1", since = "1.0.0")]
478 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
479 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
482 /// Logical "xor" with a boolean value.
484 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
485 /// the new value to the result.
487 /// Returns the previous value.
492 /// use std::sync::atomic::{AtomicBool, Ordering};
494 /// let foo = AtomicBool::new(true);
495 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
496 /// assert_eq!(foo.load(Ordering::SeqCst), true);
498 /// let foo = AtomicBool::new(true);
499 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
500 /// assert_eq!(foo.load(Ordering::SeqCst), false);
502 /// let foo = AtomicBool::new(false);
503 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
504 /// assert_eq!(foo.load(Ordering::SeqCst), false);
507 #[stable(feature = "rust1", since = "1.0.0")]
508 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
509 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
513 #[cfg(target_has_atomic = "ptr")]
514 impl<T> AtomicPtr<T> {
515 /// Creates a new `AtomicPtr`.
520 /// use std::sync::atomic::AtomicPtr;
522 /// let ptr = &mut 5;
523 /// let atomic_ptr = AtomicPtr::new(ptr);
526 #[stable(feature = "rust1", since = "1.0.0")]
527 pub const fn new(p: *mut T) -> AtomicPtr<T> {
528 AtomicPtr { p: UnsafeCell::new(p) }
531 /// Loads a value from the pointer.
533 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
537 /// Panics if `order` is `Release` or `AcqRel`.
542 /// use std::sync::atomic::{AtomicPtr, Ordering};
544 /// let ptr = &mut 5;
545 /// let some_ptr = AtomicPtr::new(ptr);
547 /// let value = some_ptr.load(Ordering::Relaxed);
550 #[stable(feature = "rust1", since = "1.0.0")]
551 pub fn load(&self, order: Ordering) -> *mut T {
553 atomic_load(self.p.get() as *mut usize, order) as *mut T
557 /// Stores a value into the pointer.
559 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
564 /// use std::sync::atomic::{AtomicPtr, Ordering};
566 /// let ptr = &mut 5;
567 /// let some_ptr = AtomicPtr::new(ptr);
569 /// let other_ptr = &mut 10;
571 /// some_ptr.store(other_ptr, Ordering::Relaxed);
576 /// Panics if `order` is `Acquire` or `AcqRel`.
578 #[stable(feature = "rust1", since = "1.0.0")]
579 pub fn store(&self, ptr: *mut T, order: Ordering) {
580 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
583 /// Stores a value into the pointer, returning the old value.
585 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
590 /// use std::sync::atomic::{AtomicPtr, Ordering};
592 /// let ptr = &mut 5;
593 /// let some_ptr = AtomicPtr::new(ptr);
595 /// let other_ptr = &mut 10;
597 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
600 #[stable(feature = "rust1", since = "1.0.0")]
601 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
602 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
605 /// Stores a value into the pointer if the current value is the same as the `current` value.
607 /// The return value is always the previous value. If it is equal to `current`, then the value
610 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
616 /// use std::sync::atomic::{AtomicPtr, Ordering};
618 /// let ptr = &mut 5;
619 /// let some_ptr = AtomicPtr::new(ptr);
621 /// let other_ptr = &mut 10;
622 /// let another_ptr = &mut 10;
624 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
627 #[stable(feature = "rust1", since = "1.0.0")]
628 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
629 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
635 /// Stores a value into the pointer if the current value is the same as the `current` value.
637 /// The return value is a result indicating whether the new value was written and containing
638 /// the previous value. On success this value is guaranteed to be equal to `current`.
640 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
641 /// operation. The first describes the required ordering if the operation succeeds while the
642 /// second describes the required ordering when the operation fails. The failure ordering can't
643 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
648 /// use std::sync::atomic::{AtomicPtr, Ordering};
650 /// let ptr = &mut 5;
651 /// let some_ptr = AtomicPtr::new(ptr);
653 /// let other_ptr = &mut 10;
654 /// let another_ptr = &mut 10;
656 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
657 /// Ordering::SeqCst, Ordering::Relaxed);
660 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
661 pub fn compare_exchange(&self,
665 failure: Ordering) -> Result<*mut T, *mut T> {
667 let res = atomic_compare_exchange(self.p.get() as *mut usize,
673 Ok(x) => Ok(x as *mut T),
674 Err(x) => Err(x as *mut T),
679 /// Stores a value into the pointer if the current value is the same as the `current` value.
681 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
682 /// comparison succeeds, which can result in more efficient code on some platforms. The
683 /// return value is a result indicating whether the new value was written and containing the
686 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
687 /// ordering of this operation. The first describes the required ordering if the operation
688 /// succeeds while the second describes the required ordering when the operation fails. The
689 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
690 /// success ordering.
695 /// use std::sync::atomic::{AtomicPtr, Ordering};
697 /// let some_ptr = AtomicPtr::new(&mut 5);
699 /// let new = &mut 10;
700 /// let mut old = some_ptr.load(Ordering::Relaxed);
702 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
704 /// Err(x) => old = x,
709 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
710 pub fn compare_exchange_weak(&self,
714 failure: Ordering) -> Result<*mut T, *mut T> {
716 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
722 Ok(x) => Ok(x as *mut T),
723 Err(x) => Err(x as *mut T),
729 macro_rules! atomic_int {
733 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
734 /// An integer type which can be safely shared between threads.
736 pub struct $atomic_type {
737 v: UnsafeCell<$int_type>,
740 /// An atomic integer initialized to `0`.
742 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
745 impl Default for $atomic_type {
746 fn default() -> Self {
747 Self::new(Default::default())
752 impl fmt::Debug for $atomic_type {
753 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
754 f.debug_tuple(stringify!($atomic_type))
755 .field(&self.load(Ordering::SeqCst))
760 // Send is implicitly implemented.
762 unsafe impl Sync for $atomic_type {}
765 /// Creates a new atomic integer.
770 /// use std::sync::atomic::AtomicIsize;
772 /// let atomic_forty_two = AtomicIsize::new(42);
776 pub const fn new(v: $int_type) -> Self {
777 $atomic_type {v: UnsafeCell::new(v)}
780 /// Loads a value from the atomic integer.
782 /// `load` takes an `Ordering` argument which describes the memory ordering of this
787 /// Panics if `order` is `Release` or `AcqRel`.
792 /// use std::sync::atomic::{AtomicIsize, Ordering};
794 /// let some_isize = AtomicIsize::new(5);
796 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
800 pub fn load(&self, order: Ordering) -> $int_type {
801 unsafe { atomic_load(self.v.get(), order) }
804 /// Stores a value into the atomic integer.
806 /// `store` takes an `Ordering` argument which describes the memory ordering of this
812 /// use std::sync::atomic::{AtomicIsize, Ordering};
814 /// let some_isize = AtomicIsize::new(5);
816 /// some_isize.store(10, Ordering::Relaxed);
817 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
822 /// Panics if `order` is `Acquire` or `AcqRel`.
825 pub fn store(&self, val: $int_type, order: Ordering) {
826 unsafe { atomic_store(self.v.get(), val, order); }
829 /// Stores a value into the atomic integer, returning the old value.
831 /// `swap` takes an `Ordering` argument which describes the memory ordering of this
837 /// use std::sync::atomic::{AtomicIsize, Ordering};
839 /// let some_isize = AtomicIsize::new(5);
841 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
845 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
846 unsafe { atomic_swap(self.v.get(), val, order) }
849 /// Stores a value into the atomic integer if the current value is the same as the
852 /// The return value is always the previous value. If it is equal to `current`, then the
853 /// value was updated.
855 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory
856 /// ordering of this operation.
861 /// use std::sync::atomic::{AtomicIsize, Ordering};
863 /// let some_isize = AtomicIsize::new(5);
865 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
866 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
868 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
869 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
873 pub fn compare_and_swap(&self,
876 order: Ordering) -> $int_type {
877 match self.compare_exchange(current,
880 strongest_failure_ordering(order)) {
886 /// Stores a value into the atomic integer if the current value is the same as the
889 /// The return value is a result indicating whether the new value was written and
890 /// containing the previous value. On success this value is guaranteed to be equal to
893 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of
894 /// this operation. The first describes the required ordering if the operation succeeds
895 /// while the second describes the required ordering when the operation fails. The
896 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker
897 /// than the success ordering.
902 /// use std::sync::atomic::{AtomicIsize, Ordering};
904 /// let some_isize = AtomicIsize::new(5);
906 /// assert_eq!(some_isize.compare_exchange(5, 10,
907 /// Ordering::Acquire,
908 /// Ordering::Relaxed),
910 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
912 /// assert_eq!(some_isize.compare_exchange(6, 12,
913 /// Ordering::SeqCst,
914 /// Ordering::Acquire),
916 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
920 pub fn compare_exchange(&self,
924 failure: Ordering) -> Result<$int_type, $int_type> {
925 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
928 /// Stores a value into the atomic integer if the current value is the same as the
931 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
932 /// comparison succeeds, which can result in more efficient code on some platforms. The
933 /// return value is a result indicating whether the new value was written and containing
934 /// the previous value.
936 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
937 /// ordering of this operation. The first describes the required ordering if the
938 /// operation succeeds while the second describes the required ordering when the
939 /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be
940 /// equivalent or weaker than the success ordering.
945 /// use std::sync::atomic::{AtomicIsize, Ordering};
947 /// let val = AtomicIsize::new(4);
949 /// let mut old = val.load(Ordering::Relaxed);
951 /// let new = old * 2;
952 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
954 /// Err(x) => old = x,
960 pub fn compare_exchange_weak(&self,
964 failure: Ordering) -> Result<$int_type, $int_type> {
966 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
970 /// Add to the current value, returning the previous value.
975 /// use std::sync::atomic::{AtomicIsize, Ordering};
977 /// let foo = AtomicIsize::new(0);
978 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
979 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
983 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
984 unsafe { atomic_add(self.v.get(), val, order) }
987 /// Subtract from the current value, returning the previous value.
992 /// use std::sync::atomic::{AtomicIsize, Ordering};
994 /// let foo = AtomicIsize::new(0);
995 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
996 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1000 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1001 unsafe { atomic_sub(self.v.get(), val, order) }
1004 /// Bitwise and with the current value, returning the previous value.
1009 /// use std::sync::atomic::{AtomicIsize, Ordering};
1011 /// let foo = AtomicIsize::new(0b101101);
1012 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1013 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1016 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1017 unsafe { atomic_and(self.v.get(), val, order) }
1020 /// Bitwise or with the current value, returning the previous value.
1025 /// use std::sync::atomic::{AtomicIsize, Ordering};
1027 /// let foo = AtomicIsize::new(0b101101);
1028 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1029 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1032 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1033 unsafe { atomic_or(self.v.get(), val, order) }
1036 /// Bitwise xor with the current value, returning the previous value.
1041 /// use std::sync::atomic::{AtomicIsize, Ordering};
1043 /// let foo = AtomicIsize::new(0b101101);
1044 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1045 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1048 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1049 unsafe { atomic_xor(self.v.get(), val, order) }
1055 #[cfg(target_has_atomic = "8")]
1057 unstable(feature = "integer_atomics", issue = "32976"),
1058 unstable(feature = "integer_atomics", issue = "32976"),
1059 unstable(feature = "integer_atomics", issue = "32976"),
1060 i8 AtomicI8 ATOMIC_I8_INIT
1062 #[cfg(target_has_atomic = "8")]
1064 unstable(feature = "integer_atomics", issue = "32976"),
1065 unstable(feature = "integer_atomics", issue = "32976"),
1066 unstable(feature = "integer_atomics", issue = "32976"),
1067 u8 AtomicU8 ATOMIC_U8_INIT
1069 #[cfg(target_has_atomic = "16")]
1071 unstable(feature = "integer_atomics", issue = "32976"),
1072 unstable(feature = "integer_atomics", issue = "32976"),
1073 unstable(feature = "integer_atomics", issue = "32976"),
1074 i16 AtomicI16 ATOMIC_I16_INIT
1076 #[cfg(target_has_atomic = "16")]
1078 unstable(feature = "integer_atomics", issue = "32976"),
1079 unstable(feature = "integer_atomics", issue = "32976"),
1080 unstable(feature = "integer_atomics", issue = "32976"),
1081 u16 AtomicU16 ATOMIC_U16_INIT
1083 #[cfg(target_has_atomic = "32")]
1085 unstable(feature = "integer_atomics", issue = "32976"),
1086 unstable(feature = "integer_atomics", issue = "32976"),
1087 unstable(feature = "integer_atomics", issue = "32976"),
1088 i32 AtomicI32 ATOMIC_I32_INIT
1090 #[cfg(target_has_atomic = "32")]
1092 unstable(feature = "integer_atomics", issue = "32976"),
1093 unstable(feature = "integer_atomics", issue = "32976"),
1094 unstable(feature = "integer_atomics", issue = "32976"),
1095 u32 AtomicU32 ATOMIC_U32_INIT
1097 #[cfg(target_has_atomic = "64")]
1099 unstable(feature = "integer_atomics", issue = "32976"),
1100 unstable(feature = "integer_atomics", issue = "32976"),
1101 unstable(feature = "integer_atomics", issue = "32976"),
1102 i64 AtomicI64 ATOMIC_I64_INIT
1104 #[cfg(target_has_atomic = "64")]
1106 unstable(feature = "integer_atomics", issue = "32976"),
1107 unstable(feature = "integer_atomics", issue = "32976"),
1108 unstable(feature = "integer_atomics", issue = "32976"),
1109 u64 AtomicU64 ATOMIC_U64_INIT
1111 #[cfg(target_has_atomic = "ptr")]
1113 stable(feature = "rust1", since = "1.0.0"),
1114 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1115 stable(feature = "atomic_debug", since = "1.3.0"),
1116 isize AtomicIsize ATOMIC_ISIZE_INIT
1118 #[cfg(target_has_atomic = "ptr")]
1120 stable(feature = "rust1", since = "1.0.0"),
1121 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1122 stable(feature = "atomic_debug", since = "1.3.0"),
1123 usize AtomicUsize ATOMIC_USIZE_INIT
1127 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1138 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1140 Release => intrinsics::atomic_store_rel(dst, val),
1141 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1142 SeqCst => intrinsics::atomic_store(dst, val),
1143 Acquire => panic!("there is no such thing as an acquire store"),
1144 AcqRel => panic!("there is no such thing as an acquire/release store"),
1149 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1151 Acquire => intrinsics::atomic_load_acq(dst),
1152 Relaxed => intrinsics::atomic_load_relaxed(dst),
1153 SeqCst => intrinsics::atomic_load(dst),
1154 Release => panic!("there is no such thing as a release load"),
1155 AcqRel => panic!("there is no such thing as an acquire/release load"),
1160 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1162 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1163 Release => intrinsics::atomic_xchg_rel(dst, val),
1164 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1165 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1166 SeqCst => intrinsics::atomic_xchg(dst, val)
1170 /// Returns the old value (like __sync_fetch_and_add).
1172 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1174 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1175 Release => intrinsics::atomic_xadd_rel(dst, val),
1176 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1177 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1178 SeqCst => intrinsics::atomic_xadd(dst, val)
1182 /// Returns the old value (like __sync_fetch_and_sub).
1184 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1186 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1187 Release => intrinsics::atomic_xsub_rel(dst, val),
1188 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1189 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1190 SeqCst => intrinsics::atomic_xsub(dst, val)
1195 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1199 failure: Ordering) -> Result<T, T> {
1200 let (val, ok) = match (success, failure) {
1201 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1202 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1203 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1204 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1205 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1206 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1207 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1208 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1209 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1210 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1211 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1212 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1222 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1226 failure: Ordering) -> Result<T, T> {
1227 let (val, ok) = match (success, failure) {
1228 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1229 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1230 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1231 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1232 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1233 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1234 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1235 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1236 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1237 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1238 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1239 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1249 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1251 Acquire => intrinsics::atomic_and_acq(dst, val),
1252 Release => intrinsics::atomic_and_rel(dst, val),
1253 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1254 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1255 SeqCst => intrinsics::atomic_and(dst, val)
1260 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1262 Acquire => intrinsics::atomic_or_acq(dst, val),
1263 Release => intrinsics::atomic_or_rel(dst, val),
1264 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1265 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1266 SeqCst => intrinsics::atomic_or(dst, val)
1271 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1273 Acquire => intrinsics::atomic_xor_acq(dst, val),
1274 Release => intrinsics::atomic_xor_rel(dst, val),
1275 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1276 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1277 SeqCst => intrinsics::atomic_xor(dst, val)
1281 /// An atomic fence.
1283 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1284 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1285 /// atomic operations X and Y, both operating on some atomic object 'M' such
1286 /// that A is sequenced before X, Y is synchronized before B and Y observes
1287 /// the change to M. This provides a happens-before dependence between A and B.
1289 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1292 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1293 /// and `Release` semantics, participates in the global program order of the
1294 /// other `SeqCst` operations and/or fences.
1296 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1300 /// Panics if `order` is `Relaxed`.
1302 #[stable(feature = "rust1", since = "1.0.0")]
1303 pub fn fence(order: Ordering) {
1306 Acquire => intrinsics::atomic_fence_acq(),
1307 Release => intrinsics::atomic_fence_rel(),
1308 AcqRel => intrinsics::atomic_fence_acqrel(),
1309 SeqCst => intrinsics::atomic_fence(),
1310 Relaxed => panic!("there is no such thing as a relaxed fence")
1316 #[cfg(target_has_atomic = "8")]
1317 #[stable(feature = "atomic_debug", since = "1.3.0")]
1318 impl fmt::Debug for AtomicBool {
1319 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1320 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1324 #[cfg(target_has_atomic = "ptr")]
1325 #[stable(feature = "atomic_debug", since = "1.3.0")]
1326 impl<T> fmt::Debug for AtomicPtr<T> {
1327 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1328 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()