1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
31 //! The most common way to share an atomic variable is to put it into an `Arc` (an
32 //! atomically-reference-counted shared pointer).
34 //! Most atomic types may be stored in static variables, initialized using
35 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
36 //! are often used for lazy global initialization.
41 //! A simple spinlock:
44 //! use std::sync::Arc;
45 //! use std::sync::atomic::{AtomicUsize, Ordering};
49 //! let spinlock = Arc::new(AtomicUsize::new(1));
51 //! let spinlock_clone = spinlock.clone();
52 //! let thread = thread::spawn(move|| {
53 //! spinlock_clone.store(0, Ordering::SeqCst);
56 //! // Wait for the other thread to release the lock
57 //! while spinlock.load(Ordering::SeqCst) != 0 {}
59 //! if let Err(panic) = thread.join() {
60 //! println!("Thread had an error: {:?}", panic);
65 //! Keep a global count of live threads:
68 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
70 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
72 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
73 //! println!("live threads: {}", old_thread_count + 1);
76 #![stable(feature = "rust1", since = "1.0.0")]
77 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
78 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
80 use self::Ordering::*;
82 use marker::{Send, Sync};
87 use result::Result::{self, Ok, Err};
92 /// A boolean type which can be safely shared between threads.
94 /// This type has the same in-memory representation as a `bool`.
95 #[cfg(target_has_atomic = "8")]
96 #[stable(feature = "rust1", since = "1.0.0")]
97 pub struct AtomicBool {
101 #[cfg(target_has_atomic = "8")]
102 #[stable(feature = "rust1", since = "1.0.0")]
103 impl Default for AtomicBool {
104 fn default() -> Self {
109 // Send is implicitly implemented for AtomicBool.
110 #[cfg(target_has_atomic = "8")]
111 #[stable(feature = "rust1", since = "1.0.0")]
112 unsafe impl Sync for AtomicBool {}
114 /// A raw pointer type which can be safely shared between threads.
116 /// This type has the same in-memory representation as a `*mut T`.
117 #[cfg(target_has_atomic = "ptr")]
118 #[stable(feature = "rust1", since = "1.0.0")]
119 pub struct AtomicPtr<T> {
120 p: UnsafeCell<*mut T>,
123 #[cfg(target_has_atomic = "ptr")]
124 #[stable(feature = "rust1", since = "1.0.0")]
125 impl<T> Default for AtomicPtr<T> {
126 fn default() -> AtomicPtr<T> {
127 AtomicPtr::new(::ptr::null_mut())
131 #[cfg(target_has_atomic = "ptr")]
132 #[stable(feature = "rust1", since = "1.0.0")]
133 unsafe impl<T> Send for AtomicPtr<T> {}
134 #[cfg(target_has_atomic = "ptr")]
135 #[stable(feature = "rust1", since = "1.0.0")]
136 unsafe impl<T> Sync for AtomicPtr<T> {}
138 /// Atomic memory orderings
140 /// Memory orderings limit the ways that both the compiler and CPU may reorder
141 /// instructions around atomic operations. At its most restrictive,
142 /// "sequentially consistent" atomics allow neither reads nor writes
143 /// to be moved either before or after the atomic operation; on the other end
144 /// "relaxed" atomics allow all reorderings.
146 /// Rust's memory orderings are [the same as
147 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
148 #[stable(feature = "rust1", since = "1.0.0")]
149 #[derive(Copy, Clone, Debug)]
151 /// No ordering constraints, only atomic operations. Corresponds to LLVM's
152 /// `Monotonic` ordering.
153 #[stable(feature = "rust1", since = "1.0.0")]
155 /// When coupled with a store, all previous writes become visible
156 /// to the other threads that perform a load with `Acquire` ordering
157 /// on the same value.
158 #[stable(feature = "rust1", since = "1.0.0")]
160 /// When coupled with a load, all subsequent loads will see data
161 /// written before a store with `Release` ordering on the same value
162 /// in other threads.
163 #[stable(feature = "rust1", since = "1.0.0")]
165 /// When coupled with a load, uses `Acquire` ordering, and with a store
166 /// `Release` ordering.
167 #[stable(feature = "rust1", since = "1.0.0")]
169 /// Like `AcqRel` with the additional guarantee that all threads see all
170 /// sequentially consistent operations in the same order.
171 #[stable(feature = "rust1", since = "1.0.0")]
175 /// An `AtomicBool` initialized to `false`.
176 #[cfg(target_has_atomic = "8")]
177 #[stable(feature = "rust1", since = "1.0.0")]
178 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
180 #[cfg(target_has_atomic = "8")]
182 /// Creates a new `AtomicBool`.
187 /// use std::sync::atomic::AtomicBool;
189 /// let atomic_true = AtomicBool::new(true);
190 /// let atomic_false = AtomicBool::new(false);
193 #[stable(feature = "rust1", since = "1.0.0")]
194 pub const fn new(v: bool) -> AtomicBool {
195 AtomicBool { v: UnsafeCell::new(v as u8) }
198 /// Returns a mutable reference to the underlying `bool`.
200 /// This is safe because the mutable reference guarantees that no other threads are
201 /// concurrently accessing the atomic data.
206 /// #![feature(atomic_access)]
207 /// use std::sync::atomic::{AtomicBool, Ordering};
209 /// let mut some_bool = AtomicBool::new(true);
210 /// assert_eq!(*some_bool.get_mut(), true);
211 /// *some_bool.get_mut() = false;
212 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
215 #[unstable(feature = "atomic_access", issue = "35603")]
216 pub fn get_mut(&mut self) -> &mut bool {
217 unsafe { &mut *(self.v.get() as *mut bool) }
220 /// Consumes the atomic and returns the contained value.
222 /// This is safe because passing `self` by value guarantees that no other threads are
223 /// concurrently accessing the atomic data.
228 /// #![feature(atomic_access)]
229 /// use std::sync::atomic::AtomicBool;
231 /// let some_bool = AtomicBool::new(true);
232 /// assert_eq!(some_bool.into_inner(), true);
235 #[unstable(feature = "atomic_access", issue = "35603")]
236 pub fn into_inner(self) -> bool {
237 unsafe { self.v.into_inner() != 0 }
240 /// Loads a value from the bool.
242 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
246 /// Panics if `order` is `Release` or `AcqRel`.
251 /// use std::sync::atomic::{AtomicBool, Ordering};
253 /// let some_bool = AtomicBool::new(true);
255 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
258 #[stable(feature = "rust1", since = "1.0.0")]
259 pub fn load(&self, order: Ordering) -> bool {
260 unsafe { atomic_load(self.v.get(), order) != 0 }
263 /// Stores a value into the bool.
265 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
270 /// use std::sync::atomic::{AtomicBool, Ordering};
272 /// let some_bool = AtomicBool::new(true);
274 /// some_bool.store(false, Ordering::Relaxed);
275 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
280 /// Panics if `order` is `Acquire` or `AcqRel`.
282 #[stable(feature = "rust1", since = "1.0.0")]
283 pub fn store(&self, val: bool, order: Ordering) {
284 unsafe { atomic_store(self.v.get(), val as u8, order); }
287 /// Stores a value into the bool, returning the old value.
289 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
294 /// use std::sync::atomic::{AtomicBool, Ordering};
296 /// let some_bool = AtomicBool::new(true);
298 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
299 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
302 #[stable(feature = "rust1", since = "1.0.0")]
303 pub fn swap(&self, val: bool, order: Ordering) -> bool {
304 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
307 /// Stores a value into the `bool` if the current value is the same as the `current` value.
309 /// The return value is always the previous value. If it is equal to `current`, then the value
312 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
318 /// use std::sync::atomic::{AtomicBool, Ordering};
320 /// let some_bool = AtomicBool::new(true);
322 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
323 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
325 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
326 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
329 #[stable(feature = "rust1", since = "1.0.0")]
330 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
331 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
337 /// Stores a value into the `bool` if the current value is the same as the `current` value.
339 /// The return value is a result indicating whether the new value was written and containing
340 /// the previous value. On success this value is guaranteed to be equal to `current`.
342 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
343 /// operation. The first describes the required ordering if the operation succeeds while the
344 /// second describes the required ordering when the operation fails. The failure ordering can't
345 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
350 /// use std::sync::atomic::{AtomicBool, Ordering};
352 /// let some_bool = AtomicBool::new(true);
354 /// assert_eq!(some_bool.compare_exchange(true,
356 /// Ordering::Acquire,
357 /// Ordering::Relaxed),
359 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
361 /// assert_eq!(some_bool.compare_exchange(true, true,
362 /// Ordering::SeqCst,
363 /// Ordering::Acquire),
365 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
368 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
369 pub fn compare_exchange(&self,
373 failure: Ordering) -> Result<bool, bool> {
374 match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8,
375 success, failure) } {
377 Err(x) => Err(x != 0),
381 /// Stores a value into the `bool` if the current value is the same as the `current` value.
383 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
384 /// comparison succeeds, which can result in more efficient code on some platforms. The
385 /// return value is a result indicating whether the new value was written and containing the
388 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
389 /// ordering of this operation. The first describes the required ordering if the operation
390 /// succeeds while the second describes the required ordering when the operation fails. The
391 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
392 /// success ordering.
397 /// use std::sync::atomic::{AtomicBool, Ordering};
399 /// let val = AtomicBool::new(false);
402 /// let mut old = val.load(Ordering::Relaxed);
404 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
406 /// Err(x) => old = x,
411 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
412 pub fn compare_exchange_weak(&self,
416 failure: Ordering) -> Result<bool, bool> {
417 match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8,
418 success, failure) } {
420 Err(x) => Err(x != 0),
424 /// Logical "and" with a boolean value.
426 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
427 /// the new value to the result.
429 /// Returns the previous value.
434 /// use std::sync::atomic::{AtomicBool, Ordering};
436 /// let foo = AtomicBool::new(true);
437 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
438 /// assert_eq!(foo.load(Ordering::SeqCst), false);
440 /// let foo = AtomicBool::new(true);
441 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
442 /// assert_eq!(foo.load(Ordering::SeqCst), true);
444 /// let foo = AtomicBool::new(false);
445 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
446 /// assert_eq!(foo.load(Ordering::SeqCst), false);
449 #[stable(feature = "rust1", since = "1.0.0")]
450 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
451 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
454 /// Logical "nand" with a boolean value.
456 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
457 /// the new value to the result.
459 /// Returns the previous value.
464 /// use std::sync::atomic::{AtomicBool, Ordering};
466 /// let foo = AtomicBool::new(true);
467 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
468 /// assert_eq!(foo.load(Ordering::SeqCst), true);
470 /// let foo = AtomicBool::new(true);
471 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
472 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
473 /// assert_eq!(foo.load(Ordering::SeqCst), false);
475 /// let foo = AtomicBool::new(false);
476 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
477 /// assert_eq!(foo.load(Ordering::SeqCst), true);
480 #[stable(feature = "rust1", since = "1.0.0")]
481 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
482 // We can't use atomic_nand here because it can result in a bool with
483 // an invalid value. This happens because the atomic operation is done
484 // with an 8-bit integer internally, which would set the upper 7 bits.
485 // So we just use a compare-exchange loop instead, which is what the
486 // intrinsic actually expands to anyways on many platforms.
487 let mut old = self.load(Relaxed);
489 let new = !(old && val);
490 match self.compare_exchange_weak(old, new, order, Relaxed) {
498 /// Logical "or" with a boolean value.
500 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
501 /// new value to the result.
503 /// Returns the previous value.
508 /// use std::sync::atomic::{AtomicBool, Ordering};
510 /// let foo = AtomicBool::new(true);
511 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
512 /// assert_eq!(foo.load(Ordering::SeqCst), true);
514 /// let foo = AtomicBool::new(true);
515 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
516 /// assert_eq!(foo.load(Ordering::SeqCst), true);
518 /// let foo = AtomicBool::new(false);
519 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
520 /// assert_eq!(foo.load(Ordering::SeqCst), false);
523 #[stable(feature = "rust1", since = "1.0.0")]
524 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
525 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
528 /// Logical "xor" with a boolean value.
530 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
531 /// the new value to the result.
533 /// Returns the previous value.
538 /// use std::sync::atomic::{AtomicBool, Ordering};
540 /// let foo = AtomicBool::new(true);
541 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
542 /// assert_eq!(foo.load(Ordering::SeqCst), true);
544 /// let foo = AtomicBool::new(true);
545 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
546 /// assert_eq!(foo.load(Ordering::SeqCst), false);
548 /// let foo = AtomicBool::new(false);
549 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
550 /// assert_eq!(foo.load(Ordering::SeqCst), false);
553 #[stable(feature = "rust1", since = "1.0.0")]
554 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
555 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
559 #[cfg(target_has_atomic = "ptr")]
560 impl<T> AtomicPtr<T> {
561 /// Creates a new `AtomicPtr`.
566 /// use std::sync::atomic::AtomicPtr;
568 /// let ptr = &mut 5;
569 /// let atomic_ptr = AtomicPtr::new(ptr);
572 #[stable(feature = "rust1", since = "1.0.0")]
573 pub const fn new(p: *mut T) -> AtomicPtr<T> {
574 AtomicPtr { p: UnsafeCell::new(p) }
577 /// Returns a mutable reference to the underlying pointer.
579 /// This is safe because the mutable reference guarantees that no other threads are
580 /// concurrently accessing the atomic data.
585 /// #![feature(atomic_access)]
586 /// use std::sync::atomic::{AtomicPtr, Ordering};
588 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
589 /// *atomic_ptr.get_mut() = &mut 5;
590 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
593 #[unstable(feature = "atomic_access", issue = "35603")]
594 pub fn get_mut(&mut self) -> &mut *mut T {
595 unsafe { &mut *self.p.get() }
598 /// Consumes the atomic and returns the contained value.
600 /// This is safe because passing `self` by value guarantees that no other threads are
601 /// concurrently accessing the atomic data.
606 /// #![feature(atomic_access)]
607 /// use std::sync::atomic::AtomicPtr;
609 /// let atomic_ptr = AtomicPtr::new(&mut 5);
610 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
613 #[unstable(feature = "atomic_access", issue = "35603")]
614 pub fn into_inner(self) -> *mut T {
615 unsafe { self.p.into_inner() }
618 /// Loads a value from the pointer.
620 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
624 /// Panics if `order` is `Release` or `AcqRel`.
629 /// use std::sync::atomic::{AtomicPtr, Ordering};
631 /// let ptr = &mut 5;
632 /// let some_ptr = AtomicPtr::new(ptr);
634 /// let value = some_ptr.load(Ordering::Relaxed);
637 #[stable(feature = "rust1", since = "1.0.0")]
638 pub fn load(&self, order: Ordering) -> *mut T {
640 atomic_load(self.p.get() as *mut usize, order) as *mut T
644 /// Stores a value into the pointer.
646 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
651 /// use std::sync::atomic::{AtomicPtr, Ordering};
653 /// let ptr = &mut 5;
654 /// let some_ptr = AtomicPtr::new(ptr);
656 /// let other_ptr = &mut 10;
658 /// some_ptr.store(other_ptr, Ordering::Relaxed);
663 /// Panics if `order` is `Acquire` or `AcqRel`.
665 #[stable(feature = "rust1", since = "1.0.0")]
666 pub fn store(&self, ptr: *mut T, order: Ordering) {
667 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
670 /// Stores a value into the pointer, returning the old value.
672 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
677 /// use std::sync::atomic::{AtomicPtr, Ordering};
679 /// let ptr = &mut 5;
680 /// let some_ptr = AtomicPtr::new(ptr);
682 /// let other_ptr = &mut 10;
684 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
687 #[stable(feature = "rust1", since = "1.0.0")]
688 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
689 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
692 /// Stores a value into the pointer if the current value is the same as the `current` value.
694 /// The return value is always the previous value. If it is equal to `current`, then the value
697 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
703 /// use std::sync::atomic::{AtomicPtr, Ordering};
705 /// let ptr = &mut 5;
706 /// let some_ptr = AtomicPtr::new(ptr);
708 /// let other_ptr = &mut 10;
709 /// let another_ptr = &mut 10;
711 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
714 #[stable(feature = "rust1", since = "1.0.0")]
715 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
716 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
722 /// Stores a value into the pointer if the current value is the same as the `current` value.
724 /// The return value is a result indicating whether the new value was written and containing
725 /// the previous value. On success this value is guaranteed to be equal to `current`.
727 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
728 /// operation. The first describes the required ordering if the operation succeeds while the
729 /// second describes the required ordering when the operation fails. The failure ordering can't
730 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
735 /// use std::sync::atomic::{AtomicPtr, Ordering};
737 /// let ptr = &mut 5;
738 /// let some_ptr = AtomicPtr::new(ptr);
740 /// let other_ptr = &mut 10;
741 /// let another_ptr = &mut 10;
743 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
744 /// Ordering::SeqCst, Ordering::Relaxed);
747 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
748 pub fn compare_exchange(&self,
752 failure: Ordering) -> Result<*mut T, *mut T> {
754 let res = atomic_compare_exchange(self.p.get() as *mut usize,
760 Ok(x) => Ok(x as *mut T),
761 Err(x) => Err(x as *mut T),
766 /// Stores a value into the pointer if the current value is the same as the `current` value.
768 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
769 /// comparison succeeds, which can result in more efficient code on some platforms. The
770 /// return value is a result indicating whether the new value was written and containing the
773 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
774 /// ordering of this operation. The first describes the required ordering if the operation
775 /// succeeds while the second describes the required ordering when the operation fails. The
776 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
777 /// success ordering.
782 /// use std::sync::atomic::{AtomicPtr, Ordering};
784 /// let some_ptr = AtomicPtr::new(&mut 5);
786 /// let new = &mut 10;
787 /// let mut old = some_ptr.load(Ordering::Relaxed);
789 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
791 /// Err(x) => old = x,
796 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
797 pub fn compare_exchange_weak(&self,
801 failure: Ordering) -> Result<*mut T, *mut T> {
803 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
809 Ok(x) => Ok(x as *mut T),
810 Err(x) => Err(x as *mut T),
816 macro_rules! atomic_int {
821 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
822 /// An integer type which can be safely shared between threads.
824 /// This type has the same in-memory representation as the underlying integer type.
826 pub struct $atomic_type {
827 v: UnsafeCell<$int_type>,
830 /// An atomic integer initialized to `0`.
832 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
835 impl Default for $atomic_type {
836 fn default() -> Self {
837 Self::new(Default::default())
842 impl fmt::Debug for $atomic_type {
843 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
844 f.debug_tuple(stringify!($atomic_type))
845 .field(&self.load(Ordering::SeqCst))
850 // Send is implicitly implemented.
852 unsafe impl Sync for $atomic_type {}
855 /// Creates a new atomic integer.
860 /// use std::sync::atomic::AtomicIsize;
862 /// let atomic_forty_two = AtomicIsize::new(42);
866 pub const fn new(v: $int_type) -> Self {
867 $atomic_type {v: UnsafeCell::new(v)}
870 /// Returns a mutable reference to the underlying integer.
872 /// This is safe because the mutable reference guarantees that no other threads are
873 /// concurrently accessing the atomic data.
878 /// #![feature(atomic_access)]
879 /// use std::sync::atomic::{AtomicIsize, Ordering};
881 /// let mut some_isize = AtomicIsize::new(10);
882 /// assert_eq!(*some_isize.get_mut(), 10);
883 /// *some_isize.get_mut() = 5;
884 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
888 pub fn get_mut(&mut self) -> &mut $int_type {
889 unsafe { &mut *self.v.get() }
892 /// Consumes the atomic and returns the contained value.
894 /// This is safe because passing `self` by value guarantees that no other threads are
895 /// concurrently accessing the atomic data.
900 /// #![feature(atomic_access)]
901 /// use std::sync::atomic::AtomicIsize;
903 /// let some_isize = AtomicIsize::new(5);
904 /// assert_eq!(some_isize.into_inner(), 5);
908 pub fn into_inner(self) -> $int_type {
909 unsafe { self.v.into_inner() }
912 /// Loads a value from the atomic integer.
914 /// `load` takes an `Ordering` argument which describes the memory ordering of this
919 /// Panics if `order` is `Release` or `AcqRel`.
924 /// use std::sync::atomic::{AtomicIsize, Ordering};
926 /// let some_isize = AtomicIsize::new(5);
928 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
932 pub fn load(&self, order: Ordering) -> $int_type {
933 unsafe { atomic_load(self.v.get(), order) }
936 /// Stores a value into the atomic integer.
938 /// `store` takes an `Ordering` argument which describes the memory ordering of this
944 /// use std::sync::atomic::{AtomicIsize, Ordering};
946 /// let some_isize = AtomicIsize::new(5);
948 /// some_isize.store(10, Ordering::Relaxed);
949 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
954 /// Panics if `order` is `Acquire` or `AcqRel`.
957 pub fn store(&self, val: $int_type, order: Ordering) {
958 unsafe { atomic_store(self.v.get(), val, order); }
961 /// Stores a value into the atomic integer, returning the old value.
963 /// `swap` takes an `Ordering` argument which describes the memory ordering of this
969 /// use std::sync::atomic::{AtomicIsize, Ordering};
971 /// let some_isize = AtomicIsize::new(5);
973 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
977 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
978 unsafe { atomic_swap(self.v.get(), val, order) }
981 /// Stores a value into the atomic integer if the current value is the same as the
984 /// The return value is always the previous value. If it is equal to `current`, then the
985 /// value was updated.
987 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory
988 /// ordering of this operation.
993 /// use std::sync::atomic::{AtomicIsize, Ordering};
995 /// let some_isize = AtomicIsize::new(5);
997 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
998 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1000 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1001 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1005 pub fn compare_and_swap(&self,
1008 order: Ordering) -> $int_type {
1009 match self.compare_exchange(current,
1012 strongest_failure_ordering(order)) {
1018 /// Stores a value into the atomic integer if the current value is the same as the
1019 /// `current` value.
1021 /// The return value is a result indicating whether the new value was written and
1022 /// containing the previous value. On success this value is guaranteed to be equal to
1025 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of
1026 /// this operation. The first describes the required ordering if the operation succeeds
1027 /// while the second describes the required ordering when the operation fails. The
1028 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker
1029 /// than the success ordering.
1034 /// use std::sync::atomic::{AtomicIsize, Ordering};
1036 /// let some_isize = AtomicIsize::new(5);
1038 /// assert_eq!(some_isize.compare_exchange(5, 10,
1039 /// Ordering::Acquire,
1040 /// Ordering::Relaxed),
1042 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1044 /// assert_eq!(some_isize.compare_exchange(6, 12,
1045 /// Ordering::SeqCst,
1046 /// Ordering::Acquire),
1048 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1052 pub fn compare_exchange(&self,
1056 failure: Ordering) -> Result<$int_type, $int_type> {
1057 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1060 /// Stores a value into the atomic integer if the current value is the same as the
1061 /// `current` value.
1063 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
1064 /// comparison succeeds, which can result in more efficient code on some platforms. The
1065 /// return value is a result indicating whether the new value was written and containing
1066 /// the previous value.
1068 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
1069 /// ordering of this operation. The first describes the required ordering if the
1070 /// operation succeeds while the second describes the required ordering when the
1071 /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be
1072 /// equivalent or weaker than the success ordering.
1077 /// use std::sync::atomic::{AtomicIsize, Ordering};
1079 /// let val = AtomicIsize::new(4);
1081 /// let mut old = val.load(Ordering::Relaxed);
1083 /// let new = old * 2;
1084 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1086 /// Err(x) => old = x,
1092 pub fn compare_exchange_weak(&self,
1096 failure: Ordering) -> Result<$int_type, $int_type> {
1098 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1102 /// Add to the current value, returning the previous value.
1107 /// use std::sync::atomic::{AtomicIsize, Ordering};
1109 /// let foo = AtomicIsize::new(0);
1110 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1111 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1115 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1116 unsafe { atomic_add(self.v.get(), val, order) }
1119 /// Subtract from the current value, returning the previous value.
1124 /// use std::sync::atomic::{AtomicIsize, Ordering};
1126 /// let foo = AtomicIsize::new(0);
1127 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1128 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1132 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1133 unsafe { atomic_sub(self.v.get(), val, order) }
1136 /// Bitwise and with the current value, returning the previous value.
1141 /// use std::sync::atomic::{AtomicIsize, Ordering};
1143 /// let foo = AtomicIsize::new(0b101101);
1144 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1145 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1148 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1149 unsafe { atomic_and(self.v.get(), val, order) }
1152 /// Bitwise or with the current value, returning the previous value.
1157 /// use std::sync::atomic::{AtomicIsize, Ordering};
1159 /// let foo = AtomicIsize::new(0b101101);
1160 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1161 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1164 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1165 unsafe { atomic_or(self.v.get(), val, order) }
1168 /// Bitwise xor with the current value, returning the previous value.
1173 /// use std::sync::atomic::{AtomicIsize, Ordering};
1175 /// let foo = AtomicIsize::new(0b101101);
1176 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1177 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1180 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1181 unsafe { atomic_xor(self.v.get(), val, order) }
1187 #[cfg(target_has_atomic = "8")]
1189 unstable(feature = "integer_atomics", issue = "32976"),
1190 unstable(feature = "integer_atomics", issue = "32976"),
1191 unstable(feature = "integer_atomics", issue = "32976"),
1192 unstable(feature = "integer_atomics", issue = "32976"),
1193 i8 AtomicI8 ATOMIC_I8_INIT
1195 #[cfg(target_has_atomic = "8")]
1197 unstable(feature = "integer_atomics", issue = "32976"),
1198 unstable(feature = "integer_atomics", issue = "32976"),
1199 unstable(feature = "integer_atomics", issue = "32976"),
1200 unstable(feature = "integer_atomics", issue = "32976"),
1201 u8 AtomicU8 ATOMIC_U8_INIT
1203 #[cfg(target_has_atomic = "16")]
1205 unstable(feature = "integer_atomics", issue = "32976"),
1206 unstable(feature = "integer_atomics", issue = "32976"),
1207 unstable(feature = "integer_atomics", issue = "32976"),
1208 unstable(feature = "integer_atomics", issue = "32976"),
1209 i16 AtomicI16 ATOMIC_I16_INIT
1211 #[cfg(target_has_atomic = "16")]
1213 unstable(feature = "integer_atomics", issue = "32976"),
1214 unstable(feature = "integer_atomics", issue = "32976"),
1215 unstable(feature = "integer_atomics", issue = "32976"),
1216 unstable(feature = "integer_atomics", issue = "32976"),
1217 u16 AtomicU16 ATOMIC_U16_INIT
1219 #[cfg(target_has_atomic = "32")]
1221 unstable(feature = "integer_atomics", issue = "32976"),
1222 unstable(feature = "integer_atomics", issue = "32976"),
1223 unstable(feature = "integer_atomics", issue = "32976"),
1224 unstable(feature = "integer_atomics", issue = "32976"),
1225 i32 AtomicI32 ATOMIC_I32_INIT
1227 #[cfg(target_has_atomic = "32")]
1229 unstable(feature = "integer_atomics", issue = "32976"),
1230 unstable(feature = "integer_atomics", issue = "32976"),
1231 unstable(feature = "integer_atomics", issue = "32976"),
1232 unstable(feature = "integer_atomics", issue = "32976"),
1233 u32 AtomicU32 ATOMIC_U32_INIT
1235 #[cfg(target_has_atomic = "64")]
1237 unstable(feature = "integer_atomics", issue = "32976"),
1238 unstable(feature = "integer_atomics", issue = "32976"),
1239 unstable(feature = "integer_atomics", issue = "32976"),
1240 unstable(feature = "integer_atomics", issue = "32976"),
1241 i64 AtomicI64 ATOMIC_I64_INIT
1243 #[cfg(target_has_atomic = "64")]
1245 unstable(feature = "integer_atomics", issue = "32976"),
1246 unstable(feature = "integer_atomics", issue = "32976"),
1247 unstable(feature = "integer_atomics", issue = "32976"),
1248 unstable(feature = "integer_atomics", issue = "32976"),
1249 u64 AtomicU64 ATOMIC_U64_INIT
1251 #[cfg(target_has_atomic = "ptr")]
1253 stable(feature = "rust1", since = "1.0.0"),
1254 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1255 stable(feature = "atomic_debug", since = "1.3.0"),
1256 unstable(feature = "atomic_access", issue = "35603"),
1257 isize AtomicIsize ATOMIC_ISIZE_INIT
1259 #[cfg(target_has_atomic = "ptr")]
1261 stable(feature = "rust1", since = "1.0.0"),
1262 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1263 stable(feature = "atomic_debug", since = "1.3.0"),
1264 unstable(feature = "atomic_access", issue = "35603"),
1265 usize AtomicUsize ATOMIC_USIZE_INIT
1269 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1280 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1282 Release => intrinsics::atomic_store_rel(dst, val),
1283 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1284 SeqCst => intrinsics::atomic_store(dst, val),
1285 Acquire => panic!("there is no such thing as an acquire store"),
1286 AcqRel => panic!("there is no such thing as an acquire/release store"),
1291 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1293 Acquire => intrinsics::atomic_load_acq(dst),
1294 Relaxed => intrinsics::atomic_load_relaxed(dst),
1295 SeqCst => intrinsics::atomic_load(dst),
1296 Release => panic!("there is no such thing as a release load"),
1297 AcqRel => panic!("there is no such thing as an acquire/release load"),
1302 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1304 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1305 Release => intrinsics::atomic_xchg_rel(dst, val),
1306 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1307 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1308 SeqCst => intrinsics::atomic_xchg(dst, val)
1312 /// Returns the old value (like __sync_fetch_and_add).
1314 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1316 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1317 Release => intrinsics::atomic_xadd_rel(dst, val),
1318 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1319 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1320 SeqCst => intrinsics::atomic_xadd(dst, val)
1324 /// Returns the old value (like __sync_fetch_and_sub).
1326 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1328 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1329 Release => intrinsics::atomic_xsub_rel(dst, val),
1330 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1331 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1332 SeqCst => intrinsics::atomic_xsub(dst, val)
1337 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1341 failure: Ordering) -> Result<T, T> {
1342 let (val, ok) = match (success, failure) {
1343 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1344 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1345 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1346 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1347 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1348 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1349 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1350 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1351 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1352 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1353 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1354 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1364 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1368 failure: Ordering) -> Result<T, T> {
1369 let (val, ok) = match (success, failure) {
1370 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1371 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1372 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1373 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1374 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1375 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1376 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1377 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1378 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1379 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1380 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1381 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1391 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1393 Acquire => intrinsics::atomic_and_acq(dst, val),
1394 Release => intrinsics::atomic_and_rel(dst, val),
1395 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1396 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1397 SeqCst => intrinsics::atomic_and(dst, val)
1402 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1404 Acquire => intrinsics::atomic_or_acq(dst, val),
1405 Release => intrinsics::atomic_or_rel(dst, val),
1406 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1407 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1408 SeqCst => intrinsics::atomic_or(dst, val)
1413 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1415 Acquire => intrinsics::atomic_xor_acq(dst, val),
1416 Release => intrinsics::atomic_xor_rel(dst, val),
1417 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1418 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1419 SeqCst => intrinsics::atomic_xor(dst, val)
1423 /// An atomic fence.
1425 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1426 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1427 /// atomic operations X and Y, both operating on some atomic object 'M' such
1428 /// that A is sequenced before X, Y is synchronized before B and Y observes
1429 /// the change to M. This provides a happens-before dependence between A and B.
1431 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1434 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1435 /// and `Release` semantics, participates in the global program order of the
1436 /// other `SeqCst` operations and/or fences.
1438 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1442 /// Panics if `order` is `Relaxed`.
1444 #[stable(feature = "rust1", since = "1.0.0")]
1445 pub fn fence(order: Ordering) {
1448 Acquire => intrinsics::atomic_fence_acq(),
1449 Release => intrinsics::atomic_fence_rel(),
1450 AcqRel => intrinsics::atomic_fence_acqrel(),
1451 SeqCst => intrinsics::atomic_fence(),
1452 Relaxed => panic!("there is no such thing as a relaxed fence")
1458 #[cfg(target_has_atomic = "8")]
1459 #[stable(feature = "atomic_debug", since = "1.3.0")]
1460 impl fmt::Debug for AtomicBool {
1461 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1462 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1466 #[cfg(target_has_atomic = "ptr")]
1467 #[stable(feature = "atomic_debug", since = "1.3.0")]
1468 impl<T> fmt::Debug for AtomicPtr<T> {
1469 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1470 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()