1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
31 //! The most common way to share an atomic variable is to put it into an `Arc` (an
32 //! atomically-reference-counted shared pointer).
34 //! Most atomic types may be stored in static variables, initialized using
35 //! the provided static initializers like `ATOMIC_BOOL_INIT`. Atomic statics
36 //! are often used for lazy global initialization.
41 //! A simple spinlock:
44 //! use std::sync::Arc;
45 //! use std::sync::atomic::{AtomicUsize, Ordering};
49 //! let spinlock = Arc::new(AtomicUsize::new(1));
51 //! let spinlock_clone = spinlock.clone();
52 //! let thread = thread::spawn(move|| {
53 //! spinlock_clone.store(0, Ordering::SeqCst);
56 //! // Wait for the other thread to release the lock
57 //! while spinlock.load(Ordering::SeqCst) != 0 {}
59 //! if let Err(panic) = thread.join() {
60 //! println!("Thread had an error: {:?}", panic);
65 //! Keep a global count of live threads:
68 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
70 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
72 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
73 //! println!("live threads: {}", old_thread_count + 1);
76 #![stable(feature = "rust1", since = "1.0.0")]
77 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
78 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
80 use self::Ordering::*;
86 /// A boolean type which can be safely shared between threads.
88 /// This type has the same in-memory representation as a `bool`.
89 #[cfg(target_has_atomic = "8")]
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub struct AtomicBool {
95 #[cfg(target_has_atomic = "8")]
96 #[stable(feature = "rust1", since = "1.0.0")]
97 impl Default for AtomicBool {
98 /// Creates an `AtomicBool` initialized to `false`.
99 fn default() -> Self {
104 // Send is implicitly implemented for AtomicBool.
105 #[cfg(target_has_atomic = "8")]
106 #[stable(feature = "rust1", since = "1.0.0")]
107 unsafe impl Sync for AtomicBool {}
109 /// A raw pointer type which can be safely shared between threads.
111 /// This type has the same in-memory representation as a `*mut T`.
112 #[cfg(target_has_atomic = "ptr")]
113 #[stable(feature = "rust1", since = "1.0.0")]
114 pub struct AtomicPtr<T> {
115 p: UnsafeCell<*mut T>,
118 #[cfg(target_has_atomic = "ptr")]
119 #[stable(feature = "rust1", since = "1.0.0")]
120 impl<T> Default for AtomicPtr<T> {
121 /// Creates a null `AtomicPtr<T>`.
122 fn default() -> AtomicPtr<T> {
123 AtomicPtr::new(::ptr::null_mut())
127 #[cfg(target_has_atomic = "ptr")]
128 #[stable(feature = "rust1", since = "1.0.0")]
129 unsafe impl<T> Send for AtomicPtr<T> {}
130 #[cfg(target_has_atomic = "ptr")]
131 #[stable(feature = "rust1", since = "1.0.0")]
132 unsafe impl<T> Sync for AtomicPtr<T> {}
134 /// Atomic memory orderings
136 /// Memory orderings limit the ways that both the compiler and CPU may reorder
137 /// instructions around atomic operations. At its most restrictive,
138 /// "sequentially consistent" atomics allow neither reads nor writes
139 /// to be moved either before or after the atomic operation; on the other end
140 /// "relaxed" atomics allow all reorderings.
142 /// Rust's memory orderings are [the same as
143 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
144 #[stable(feature = "rust1", since = "1.0.0")]
145 #[derive(Copy, Clone, Debug)]
147 /// No ordering constraints, only atomic operations. Corresponds to LLVM's
148 /// `Monotonic` ordering.
149 #[stable(feature = "rust1", since = "1.0.0")]
151 /// When coupled with a store, all previous writes become visible
152 /// to the other threads that perform a load with `Acquire` ordering
153 /// on the same value.
154 #[stable(feature = "rust1", since = "1.0.0")]
156 /// When coupled with a load, all subsequent loads will see data
157 /// written before a store with `Release` ordering on the same value
158 /// in other threads.
159 #[stable(feature = "rust1", since = "1.0.0")]
161 /// When coupled with a load, uses `Acquire` ordering, and with a store
162 /// `Release` ordering.
163 #[stable(feature = "rust1", since = "1.0.0")]
165 /// Like `AcqRel` with the additional guarantee that all threads see all
166 /// sequentially consistent operations in the same order.
167 #[stable(feature = "rust1", since = "1.0.0")]
169 // Prevent exhaustive matching to allow for future extension
171 #[unstable(feature = "future_atomic_orderings", issue = "0")]
175 /// An `AtomicBool` initialized to `false`.
176 #[cfg(target_has_atomic = "8")]
177 #[stable(feature = "rust1", since = "1.0.0")]
178 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
180 #[cfg(target_has_atomic = "8")]
182 /// Creates a new `AtomicBool`.
187 /// use std::sync::atomic::AtomicBool;
189 /// let atomic_true = AtomicBool::new(true);
190 /// let atomic_false = AtomicBool::new(false);
193 #[stable(feature = "rust1", since = "1.0.0")]
194 pub const fn new(v: bool) -> AtomicBool {
195 AtomicBool { v: UnsafeCell::new(v as u8) }
198 /// Returns a mutable reference to the underlying `bool`.
200 /// This is safe because the mutable reference guarantees that no other threads are
201 /// concurrently accessing the atomic data.
206 /// use std::sync::atomic::{AtomicBool, Ordering};
208 /// let mut some_bool = AtomicBool::new(true);
209 /// assert_eq!(*some_bool.get_mut(), true);
210 /// *some_bool.get_mut() = false;
211 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
214 #[stable(feature = "atomic_access", since = "1.15.0")]
215 pub fn get_mut(&mut self) -> &mut bool {
216 unsafe { &mut *(self.v.get() as *mut bool) }
219 /// Consumes the atomic and returns the contained value.
221 /// This is safe because passing `self` by value guarantees that no other threads are
222 /// concurrently accessing the atomic data.
227 /// use std::sync::atomic::AtomicBool;
229 /// let some_bool = AtomicBool::new(true);
230 /// assert_eq!(some_bool.into_inner(), true);
233 #[stable(feature = "atomic_access", since = "1.15.0")]
234 pub fn into_inner(self) -> bool {
235 unsafe { self.v.into_inner() != 0 }
238 /// Loads a value from the bool.
240 /// `load` takes an [`Ordering`] argument which describes the memory ordering
241 /// of this operation.
245 /// Panics if `order` is [`Release`] or [`AcqRel`].
247 /// [`Ordering`]: enum.Ordering.html
248 /// [`Release`]: enum.Ordering.html#variant.Release
249 /// [`AcqRel`]: enum.Ordering.html#variant.Release
254 /// use std::sync::atomic::{AtomicBool, Ordering};
256 /// let some_bool = AtomicBool::new(true);
258 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
261 #[stable(feature = "rust1", since = "1.0.0")]
262 pub fn load(&self, order: Ordering) -> bool {
263 unsafe { atomic_load(self.v.get(), order) != 0 }
266 /// Stores a value into the bool.
268 /// `store` takes an [`Ordering`] argument which describes the memory ordering
269 /// of this operation.
271 /// [`Ordering`]: enum.Ordering.html
276 /// use std::sync::atomic::{AtomicBool, Ordering};
278 /// let some_bool = AtomicBool::new(true);
280 /// some_bool.store(false, Ordering::Relaxed);
281 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
286 /// Panics if `order` is `Acquire` or `AcqRel`.
288 #[stable(feature = "rust1", since = "1.0.0")]
289 pub fn store(&self, val: bool, order: Ordering) {
291 atomic_store(self.v.get(), val as u8, order);
295 /// Stores a value into the bool, returning the old value.
297 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
298 /// of this operation.
300 /// [`Ordering`]: enum.Ordering.html
305 /// use std::sync::atomic::{AtomicBool, Ordering};
307 /// let some_bool = AtomicBool::new(true);
309 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
310 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
313 #[stable(feature = "rust1", since = "1.0.0")]
314 pub fn swap(&self, val: bool, order: Ordering) -> bool {
315 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
318 /// Stores a value into the `bool` if the current value is the same as the `current` value.
320 /// The return value is always the previous value. If it is equal to `current`, then the value
323 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
324 /// ordering of this operation.
326 /// [`Ordering`]: enum.Ordering.html
331 /// use std::sync::atomic::{AtomicBool, Ordering};
333 /// let some_bool = AtomicBool::new(true);
335 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
336 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
338 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
339 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
342 #[stable(feature = "rust1", since = "1.0.0")]
343 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
344 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
350 /// Stores a value into the `bool` if the current value is the same as the `current` value.
352 /// The return value is a result indicating whether the new value was written and containing
353 /// the previous value. On success this value is guaranteed to be equal to `current`.
355 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
356 /// ordering of this operation. The first describes the required ordering if the
357 /// operation succeeds while the second describes the required ordering when the
358 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
359 /// be equivalent or weaker than the success ordering.
361 /// [`Ordering`]: enum.Ordering.html
362 /// [`Release`]: enum.Ordering.html#variant.Release
363 /// [`AcqRel`]: enum.Ordering.html#variant.Release
368 /// use std::sync::atomic::{AtomicBool, Ordering};
370 /// let some_bool = AtomicBool::new(true);
372 /// assert_eq!(some_bool.compare_exchange(true,
374 /// Ordering::Acquire,
375 /// Ordering::Relaxed),
377 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
379 /// assert_eq!(some_bool.compare_exchange(true, true,
380 /// Ordering::SeqCst,
381 /// Ordering::Acquire),
383 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
386 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
387 pub fn compare_exchange(&self,
392 -> Result<bool, bool> {
394 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
397 Err(x) => Err(x != 0),
401 /// Stores a value into the `bool` if the current value is the same as the `current` value.
403 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
404 /// comparison succeeds, which can result in more efficient code on some platforms. The
405 /// return value is a result indicating whether the new value was written and containing the
408 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
409 /// ordering of this operation. The first describes the required ordering if the operation
410 /// succeeds while the second describes the required ordering when the operation fails. The
411 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
412 /// weaker than the success ordering.
414 /// [`Ordering`]: enum.Ordering.html
415 /// [`Release`]: enum.Ordering.html#variant.Release
416 /// [`AcqRel`]: enum.Ordering.html#variant.Release
421 /// use std::sync::atomic::{AtomicBool, Ordering};
423 /// let val = AtomicBool::new(false);
426 /// let mut old = val.load(Ordering::Relaxed);
428 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
430 /// Err(x) => old = x,
435 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
436 pub fn compare_exchange_weak(&self,
441 -> Result<bool, bool> {
443 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
446 Err(x) => Err(x != 0),
450 /// Logical "and" with a boolean value.
452 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
453 /// the new value to the result.
455 /// Returns the previous value.
460 /// use std::sync::atomic::{AtomicBool, Ordering};
462 /// let foo = AtomicBool::new(true);
463 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
464 /// assert_eq!(foo.load(Ordering::SeqCst), false);
466 /// let foo = AtomicBool::new(true);
467 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
468 /// assert_eq!(foo.load(Ordering::SeqCst), true);
470 /// let foo = AtomicBool::new(false);
471 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
472 /// assert_eq!(foo.load(Ordering::SeqCst), false);
475 #[stable(feature = "rust1", since = "1.0.0")]
476 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
477 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
480 /// Logical "nand" with a boolean value.
482 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
483 /// the new value to the result.
485 /// Returns the previous value.
490 /// use std::sync::atomic::{AtomicBool, Ordering};
492 /// let foo = AtomicBool::new(true);
493 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
494 /// assert_eq!(foo.load(Ordering::SeqCst), true);
496 /// let foo = AtomicBool::new(true);
497 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
498 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
499 /// assert_eq!(foo.load(Ordering::SeqCst), false);
501 /// let foo = AtomicBool::new(false);
502 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
503 /// assert_eq!(foo.load(Ordering::SeqCst), true);
506 #[stable(feature = "rust1", since = "1.0.0")]
507 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
508 // We can't use atomic_nand here because it can result in a bool with
509 // an invalid value. This happens because the atomic operation is done
510 // with an 8-bit integer internally, which would set the upper 7 bits.
511 // So we just use a compare-exchange loop instead, which is what the
512 // intrinsic actually expands to anyways on many platforms.
513 let mut old = self.load(Relaxed);
515 let new = !(old && val);
516 match self.compare_exchange_weak(old, new, order, Relaxed) {
524 /// Logical "or" with a boolean value.
526 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
527 /// new value to the result.
529 /// Returns the previous value.
534 /// use std::sync::atomic::{AtomicBool, Ordering};
536 /// let foo = AtomicBool::new(true);
537 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
538 /// assert_eq!(foo.load(Ordering::SeqCst), true);
540 /// let foo = AtomicBool::new(true);
541 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
542 /// assert_eq!(foo.load(Ordering::SeqCst), true);
544 /// let foo = AtomicBool::new(false);
545 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
546 /// assert_eq!(foo.load(Ordering::SeqCst), false);
549 #[stable(feature = "rust1", since = "1.0.0")]
550 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
551 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
554 /// Logical "xor" with a boolean value.
556 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
557 /// the new value to the result.
559 /// Returns the previous value.
564 /// use std::sync::atomic::{AtomicBool, Ordering};
566 /// let foo = AtomicBool::new(true);
567 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
568 /// assert_eq!(foo.load(Ordering::SeqCst), true);
570 /// let foo = AtomicBool::new(true);
571 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
572 /// assert_eq!(foo.load(Ordering::SeqCst), false);
574 /// let foo = AtomicBool::new(false);
575 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
576 /// assert_eq!(foo.load(Ordering::SeqCst), false);
579 #[stable(feature = "rust1", since = "1.0.0")]
580 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
581 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
585 #[cfg(target_has_atomic = "ptr")]
586 impl<T> AtomicPtr<T> {
587 /// Creates a new `AtomicPtr`.
592 /// use std::sync::atomic::AtomicPtr;
594 /// let ptr = &mut 5;
595 /// let atomic_ptr = AtomicPtr::new(ptr);
598 #[stable(feature = "rust1", since = "1.0.0")]
599 pub const fn new(p: *mut T) -> AtomicPtr<T> {
600 AtomicPtr { p: UnsafeCell::new(p) }
603 /// Returns a mutable reference to the underlying pointer.
605 /// This is safe because the mutable reference guarantees that no other threads are
606 /// concurrently accessing the atomic data.
611 /// use std::sync::atomic::{AtomicPtr, Ordering};
613 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
614 /// *atomic_ptr.get_mut() = &mut 5;
615 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
618 #[stable(feature = "atomic_access", since = "1.15.0")]
619 pub fn get_mut(&mut self) -> &mut *mut T {
620 unsafe { &mut *self.p.get() }
623 /// Consumes the atomic and returns the contained value.
625 /// This is safe because passing `self` by value guarantees that no other threads are
626 /// concurrently accessing the atomic data.
631 /// use std::sync::atomic::AtomicPtr;
633 /// let atomic_ptr = AtomicPtr::new(&mut 5);
634 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
637 #[stable(feature = "atomic_access", since = "1.15.0")]
638 pub fn into_inner(self) -> *mut T {
639 unsafe { self.p.into_inner() }
642 /// Loads a value from the pointer.
644 /// `load` takes an [`Ordering`] argument which describes the memory ordering
645 /// of this operation.
649 /// Panics if `order` is [`Release`] or [`AcqRel`].
651 /// [`Ordering`]: enum.Ordering.html
652 /// [`Release`]: enum.Ordering.html#variant.Release
653 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
658 /// use std::sync::atomic::{AtomicPtr, Ordering};
660 /// let ptr = &mut 5;
661 /// let some_ptr = AtomicPtr::new(ptr);
663 /// let value = some_ptr.load(Ordering::Relaxed);
666 #[stable(feature = "rust1", since = "1.0.0")]
667 pub fn load(&self, order: Ordering) -> *mut T {
668 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
671 /// Stores a value into the pointer.
673 /// `store` takes an [`Ordering`] argument which describes the memory ordering
674 /// of this operation.
676 /// [`Ordering`]: enum.Ordering.html
681 /// use std::sync::atomic::{AtomicPtr, Ordering};
683 /// let ptr = &mut 5;
684 /// let some_ptr = AtomicPtr::new(ptr);
686 /// let other_ptr = &mut 10;
688 /// some_ptr.store(other_ptr, Ordering::Relaxed);
693 /// Panics if `order` is `Acquire` or `AcqRel`.
695 #[stable(feature = "rust1", since = "1.0.0")]
696 pub fn store(&self, ptr: *mut T, order: Ordering) {
698 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
702 /// Stores a value into the pointer, returning the old value.
704 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
705 /// of this operation.
707 /// [`Ordering`]: enum.Ordering.html
712 /// use std::sync::atomic::{AtomicPtr, Ordering};
714 /// let ptr = &mut 5;
715 /// let some_ptr = AtomicPtr::new(ptr);
717 /// let other_ptr = &mut 10;
719 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
722 #[stable(feature = "rust1", since = "1.0.0")]
723 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
724 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
727 /// Stores a value into the pointer if the current value is the same as the `current` value.
729 /// The return value is always the previous value. If it is equal to `current`, then the value
732 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
733 /// ordering of this operation.
735 /// [`Ordering`]: enum.Ordering.html
740 /// use std::sync::atomic::{AtomicPtr, Ordering};
742 /// let ptr = &mut 5;
743 /// let some_ptr = AtomicPtr::new(ptr);
745 /// let other_ptr = &mut 10;
746 /// let another_ptr = &mut 10;
748 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
751 #[stable(feature = "rust1", since = "1.0.0")]
752 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
753 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
759 /// Stores a value into the pointer if the current value is the same as the `current` value.
761 /// The return value is a result indicating whether the new value was written and containing
762 /// the previous value. On success this value is guaranteed to be equal to `current`.
764 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
765 /// ordering of this operation. The first describes the required ordering if
766 /// the operation succeeds while the second describes the required ordering when
767 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
768 /// and must be equivalent or weaker than the success ordering.
770 /// [`Ordering`]: enum.Ordering.html
771 /// [`Release`]: enum.Ordering.html#variant.Release
772 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
777 /// use std::sync::atomic::{AtomicPtr, Ordering};
779 /// let ptr = &mut 5;
780 /// let some_ptr = AtomicPtr::new(ptr);
782 /// let other_ptr = &mut 10;
783 /// let another_ptr = &mut 10;
785 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
786 /// Ordering::SeqCst, Ordering::Relaxed);
789 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
790 pub fn compare_exchange(&self,
795 -> Result<*mut T, *mut T> {
797 let res = atomic_compare_exchange(self.p.get() as *mut usize,
803 Ok(x) => Ok(x as *mut T),
804 Err(x) => Err(x as *mut T),
809 /// Stores a value into the pointer if the current value is the same as the `current` value.
811 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
812 /// comparison succeeds, which can result in more efficient code on some platforms. The
813 /// return value is a result indicating whether the new value was written and containing the
816 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
817 /// ordering of this operation. The first describes the required ordering if the operation
818 /// succeeds while the second describes the required ordering when the operation fails. The
819 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
820 /// weaker than the success ordering.
822 /// [`compare_exchange`]: #method.compare_exchange
823 /// [`Ordering`]: enum.Ordering.html
824 /// [`Release`]: enum.Ordering.html#variant.Release
825 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
830 /// use std::sync::atomic::{AtomicPtr, Ordering};
832 /// let some_ptr = AtomicPtr::new(&mut 5);
834 /// let new = &mut 10;
835 /// let mut old = some_ptr.load(Ordering::Relaxed);
837 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
839 /// Err(x) => old = x,
844 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
845 pub fn compare_exchange_weak(&self,
850 -> Result<*mut T, *mut T> {
852 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
858 Ok(x) => Ok(x as *mut T),
859 Err(x) => Err(x as *mut T),
865 macro_rules! atomic_int {
870 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
871 /// An integer type which can be safely shared between threads.
873 /// This type has the same in-memory representation as the underlying integer type.
875 pub struct $atomic_type {
876 v: UnsafeCell<$int_type>,
879 /// An atomic integer initialized to `0`.
881 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
884 impl Default for $atomic_type {
885 fn default() -> Self {
886 Self::new(Default::default())
891 impl fmt::Debug for $atomic_type {
892 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
893 f.debug_tuple(stringify!($atomic_type))
894 .field(&self.load(Ordering::SeqCst))
899 // Send is implicitly implemented.
901 unsafe impl Sync for $atomic_type {}
904 /// Creates a new atomic integer.
909 /// use std::sync::atomic::AtomicIsize;
911 /// let atomic_forty_two = AtomicIsize::new(42);
915 pub const fn new(v: $int_type) -> Self {
916 $atomic_type {v: UnsafeCell::new(v)}
919 /// Returns a mutable reference to the underlying integer.
921 /// This is safe because the mutable reference guarantees that no other threads are
922 /// concurrently accessing the atomic data.
927 /// use std::sync::atomic::{AtomicIsize, Ordering};
929 /// let mut some_isize = AtomicIsize::new(10);
930 /// assert_eq!(*some_isize.get_mut(), 10);
931 /// *some_isize.get_mut() = 5;
932 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
936 pub fn get_mut(&mut self) -> &mut $int_type {
937 unsafe { &mut *self.v.get() }
940 /// Consumes the atomic and returns the contained value.
942 /// This is safe because passing `self` by value guarantees that no other threads are
943 /// concurrently accessing the atomic data.
948 /// use std::sync::atomic::AtomicIsize;
950 /// let some_isize = AtomicIsize::new(5);
951 /// assert_eq!(some_isize.into_inner(), 5);
955 pub fn into_inner(self) -> $int_type {
956 unsafe { self.v.into_inner() }
959 /// Loads a value from the atomic integer.
961 /// `load` takes an `Ordering` argument which describes the memory ordering of this
966 /// Panics if `order` is `Release` or `AcqRel`.
971 /// use std::sync::atomic::{AtomicIsize, Ordering};
973 /// let some_isize = AtomicIsize::new(5);
975 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
979 pub fn load(&self, order: Ordering) -> $int_type {
980 unsafe { atomic_load(self.v.get(), order) }
983 /// Stores a value into the atomic integer.
985 /// `store` takes an `Ordering` argument which describes the memory ordering of this
991 /// use std::sync::atomic::{AtomicIsize, Ordering};
993 /// let some_isize = AtomicIsize::new(5);
995 /// some_isize.store(10, Ordering::Relaxed);
996 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1001 /// Panics if `order` is `Acquire` or `AcqRel`.
1004 pub fn store(&self, val: $int_type, order: Ordering) {
1005 unsafe { atomic_store(self.v.get(), val, order); }
1008 /// Stores a value into the atomic integer, returning the old value.
1010 /// `swap` takes an `Ordering` argument which describes the memory ordering of this
1016 /// use std::sync::atomic::{AtomicIsize, Ordering};
1018 /// let some_isize = AtomicIsize::new(5);
1020 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
1024 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1025 unsafe { atomic_swap(self.v.get(), val, order) }
1028 /// Stores a value into the atomic integer if the current value is the same as the
1029 /// `current` value.
1031 /// The return value is always the previous value. If it is equal to `current`, then the
1032 /// value was updated.
1034 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory
1035 /// ordering of this operation.
1040 /// use std::sync::atomic::{AtomicIsize, Ordering};
1042 /// let some_isize = AtomicIsize::new(5);
1044 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1045 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1047 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1048 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1052 pub fn compare_and_swap(&self,
1055 order: Ordering) -> $int_type {
1056 match self.compare_exchange(current,
1059 strongest_failure_ordering(order)) {
1065 /// Stores a value into the atomic integer if the current value is the same as the
1066 /// `current` value.
1068 /// The return value is a result indicating whether the new value was written and
1069 /// containing the previous value. On success this value is guaranteed to be equal to
1072 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of
1073 /// this operation. The first describes the required ordering if the operation succeeds
1074 /// while the second describes the required ordering when the operation fails. The
1075 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker
1076 /// than the success ordering.
1081 /// use std::sync::atomic::{AtomicIsize, Ordering};
1083 /// let some_isize = AtomicIsize::new(5);
1085 /// assert_eq!(some_isize.compare_exchange(5, 10,
1086 /// Ordering::Acquire,
1087 /// Ordering::Relaxed),
1089 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1091 /// assert_eq!(some_isize.compare_exchange(6, 12,
1092 /// Ordering::SeqCst,
1093 /// Ordering::Acquire),
1095 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1099 pub fn compare_exchange(&self,
1103 failure: Ordering) -> Result<$int_type, $int_type> {
1104 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1107 /// Stores a value into the atomic integer if the current value is the same as the
1108 /// `current` value.
1110 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
1111 /// comparison succeeds, which can result in more efficient code on some platforms. The
1112 /// return value is a result indicating whether the new value was written and containing
1113 /// the previous value.
1115 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
1116 /// ordering of this operation. The first describes the required ordering if the
1117 /// operation succeeds while the second describes the required ordering when the
1118 /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be
1119 /// equivalent or weaker than the success ordering.
1124 /// use std::sync::atomic::{AtomicIsize, Ordering};
1126 /// let val = AtomicIsize::new(4);
1128 /// let mut old = val.load(Ordering::Relaxed);
1130 /// let new = old * 2;
1131 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1133 /// Err(x) => old = x,
1139 pub fn compare_exchange_weak(&self,
1143 failure: Ordering) -> Result<$int_type, $int_type> {
1145 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1149 /// Add to the current value, returning the previous value.
1154 /// use std::sync::atomic::{AtomicIsize, Ordering};
1156 /// let foo = AtomicIsize::new(0);
1157 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1158 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1162 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1163 unsafe { atomic_add(self.v.get(), val, order) }
1166 /// Subtract from the current value, returning the previous value.
1171 /// use std::sync::atomic::{AtomicIsize, Ordering};
1173 /// let foo = AtomicIsize::new(0);
1174 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1175 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1179 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1180 unsafe { atomic_sub(self.v.get(), val, order) }
1183 /// Bitwise and with the current value, returning the previous value.
1188 /// use std::sync::atomic::{AtomicIsize, Ordering};
1190 /// let foo = AtomicIsize::new(0b101101);
1191 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1192 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1195 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1196 unsafe { atomic_and(self.v.get(), val, order) }
1199 /// Bitwise or with the current value, returning the previous value.
1204 /// use std::sync::atomic::{AtomicIsize, Ordering};
1206 /// let foo = AtomicIsize::new(0b101101);
1207 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1208 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1211 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1212 unsafe { atomic_or(self.v.get(), val, order) }
1215 /// Bitwise xor with the current value, returning the previous value.
1220 /// use std::sync::atomic::{AtomicIsize, Ordering};
1222 /// let foo = AtomicIsize::new(0b101101);
1223 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1224 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1227 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1228 unsafe { atomic_xor(self.v.get(), val, order) }
1234 #[cfg(target_has_atomic = "8")]
1236 unstable(feature = "integer_atomics", issue = "32976"),
1237 unstable(feature = "integer_atomics", issue = "32976"),
1238 unstable(feature = "integer_atomics", issue = "32976"),
1239 unstable(feature = "integer_atomics", issue = "32976"),
1240 i8 AtomicI8 ATOMIC_I8_INIT
1242 #[cfg(target_has_atomic = "8")]
1244 unstable(feature = "integer_atomics", issue = "32976"),
1245 unstable(feature = "integer_atomics", issue = "32976"),
1246 unstable(feature = "integer_atomics", issue = "32976"),
1247 unstable(feature = "integer_atomics", issue = "32976"),
1248 u8 AtomicU8 ATOMIC_U8_INIT
1250 #[cfg(target_has_atomic = "16")]
1252 unstable(feature = "integer_atomics", issue = "32976"),
1253 unstable(feature = "integer_atomics", issue = "32976"),
1254 unstable(feature = "integer_atomics", issue = "32976"),
1255 unstable(feature = "integer_atomics", issue = "32976"),
1256 i16 AtomicI16 ATOMIC_I16_INIT
1258 #[cfg(target_has_atomic = "16")]
1260 unstable(feature = "integer_atomics", issue = "32976"),
1261 unstable(feature = "integer_atomics", issue = "32976"),
1262 unstable(feature = "integer_atomics", issue = "32976"),
1263 unstable(feature = "integer_atomics", issue = "32976"),
1264 u16 AtomicU16 ATOMIC_U16_INIT
1266 #[cfg(target_has_atomic = "32")]
1268 unstable(feature = "integer_atomics", issue = "32976"),
1269 unstable(feature = "integer_atomics", issue = "32976"),
1270 unstable(feature = "integer_atomics", issue = "32976"),
1271 unstable(feature = "integer_atomics", issue = "32976"),
1272 i32 AtomicI32 ATOMIC_I32_INIT
1274 #[cfg(target_has_atomic = "32")]
1276 unstable(feature = "integer_atomics", issue = "32976"),
1277 unstable(feature = "integer_atomics", issue = "32976"),
1278 unstable(feature = "integer_atomics", issue = "32976"),
1279 unstable(feature = "integer_atomics", issue = "32976"),
1280 u32 AtomicU32 ATOMIC_U32_INIT
1282 #[cfg(target_has_atomic = "64")]
1284 unstable(feature = "integer_atomics", issue = "32976"),
1285 unstable(feature = "integer_atomics", issue = "32976"),
1286 unstable(feature = "integer_atomics", issue = "32976"),
1287 unstable(feature = "integer_atomics", issue = "32976"),
1288 i64 AtomicI64 ATOMIC_I64_INIT
1290 #[cfg(target_has_atomic = "64")]
1292 unstable(feature = "integer_atomics", issue = "32976"),
1293 unstable(feature = "integer_atomics", issue = "32976"),
1294 unstable(feature = "integer_atomics", issue = "32976"),
1295 unstable(feature = "integer_atomics", issue = "32976"),
1296 u64 AtomicU64 ATOMIC_U64_INIT
1298 #[cfg(target_has_atomic = "ptr")]
1300 stable(feature = "rust1", since = "1.0.0"),
1301 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1302 stable(feature = "atomic_debug", since = "1.3.0"),
1303 stable(feature = "atomic_access", since = "1.15.0"),
1304 isize AtomicIsize ATOMIC_ISIZE_INIT
1306 #[cfg(target_has_atomic = "ptr")]
1308 stable(feature = "rust1", since = "1.0.0"),
1309 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1310 stable(feature = "atomic_debug", since = "1.3.0"),
1311 stable(feature = "atomic_access", since = "1.15.0"),
1312 usize AtomicUsize ATOMIC_USIZE_INIT
1316 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1323 __Nonexhaustive => __Nonexhaustive,
1328 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1330 Release => intrinsics::atomic_store_rel(dst, val),
1331 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1332 SeqCst => intrinsics::atomic_store(dst, val),
1333 Acquire => panic!("there is no such thing as an acquire store"),
1334 AcqRel => panic!("there is no such thing as an acquire/release store"),
1335 __Nonexhaustive => panic!("invalid memory ordering"),
1340 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1342 Acquire => intrinsics::atomic_load_acq(dst),
1343 Relaxed => intrinsics::atomic_load_relaxed(dst),
1344 SeqCst => intrinsics::atomic_load(dst),
1345 Release => panic!("there is no such thing as a release load"),
1346 AcqRel => panic!("there is no such thing as an acquire/release load"),
1347 __Nonexhaustive => panic!("invalid memory ordering"),
1352 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1354 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1355 Release => intrinsics::atomic_xchg_rel(dst, val),
1356 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1357 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1358 SeqCst => intrinsics::atomic_xchg(dst, val),
1359 __Nonexhaustive => panic!("invalid memory ordering"),
1363 /// Returns the old value (like __sync_fetch_and_add).
1365 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1367 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1368 Release => intrinsics::atomic_xadd_rel(dst, val),
1369 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1370 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1371 SeqCst => intrinsics::atomic_xadd(dst, val),
1372 __Nonexhaustive => panic!("invalid memory ordering"),
1376 /// Returns the old value (like __sync_fetch_and_sub).
1378 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1380 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1381 Release => intrinsics::atomic_xsub_rel(dst, val),
1382 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1383 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1384 SeqCst => intrinsics::atomic_xsub(dst, val),
1385 __Nonexhaustive => panic!("invalid memory ordering"),
1390 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1396 let (val, ok) = match (success, failure) {
1397 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1398 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1399 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1400 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1401 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1402 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1403 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1404 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1405 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1406 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1407 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1408 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1409 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1410 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1412 if ok { Ok(val) } else { Err(val) }
1416 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1422 let (val, ok) = match (success, failure) {
1423 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1424 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1425 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1426 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1427 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1428 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1429 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1430 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1431 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1432 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1433 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1434 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1435 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1436 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1438 if ok { Ok(val) } else { Err(val) }
1442 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1444 Acquire => intrinsics::atomic_and_acq(dst, val),
1445 Release => intrinsics::atomic_and_rel(dst, val),
1446 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1447 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1448 SeqCst => intrinsics::atomic_and(dst, val),
1449 __Nonexhaustive => panic!("invalid memory ordering"),
1454 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1456 Acquire => intrinsics::atomic_or_acq(dst, val),
1457 Release => intrinsics::atomic_or_rel(dst, val),
1458 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1459 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1460 SeqCst => intrinsics::atomic_or(dst, val),
1461 __Nonexhaustive => panic!("invalid memory ordering"),
1466 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1468 Acquire => intrinsics::atomic_xor_acq(dst, val),
1469 Release => intrinsics::atomic_xor_rel(dst, val),
1470 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1471 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1472 SeqCst => intrinsics::atomic_xor(dst, val),
1473 __Nonexhaustive => panic!("invalid memory ordering"),
1477 /// An atomic fence.
1479 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1480 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1481 /// atomic operations X and Y, both operating on some atomic object 'M' such
1482 /// that A is sequenced before X, Y is synchronized before B and Y observes
1483 /// the change to M. This provides a happens-before dependence between A and B.
1485 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1488 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1489 /// and `Release` semantics, participates in the global program order of the
1490 /// other `SeqCst` operations and/or fences.
1492 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1496 /// Panics if `order` is `Relaxed`.
1498 #[stable(feature = "rust1", since = "1.0.0")]
1499 pub fn fence(order: Ordering) {
1502 Acquire => intrinsics::atomic_fence_acq(),
1503 Release => intrinsics::atomic_fence_rel(),
1504 AcqRel => intrinsics::atomic_fence_acqrel(),
1505 SeqCst => intrinsics::atomic_fence(),
1506 Relaxed => panic!("there is no such thing as a relaxed fence"),
1507 __Nonexhaustive => panic!("invalid memory ordering"),
1513 #[cfg(target_has_atomic = "8")]
1514 #[stable(feature = "atomic_debug", since = "1.3.0")]
1515 impl fmt::Debug for AtomicBool {
1516 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1517 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1521 #[cfg(target_has_atomic = "ptr")]
1522 #[stable(feature = "atomic_debug", since = "1.3.0")]
1523 impl<T> fmt::Debug for AtomicPtr<T> {
1524 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1525 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()