3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 // ignore-tidy-undocumented-unsafe
117 #![stable(feature = "rust1", since = "1.0.0")]
118 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
119 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
121 use self::Ordering::*;
123 use crate::cell::UnsafeCell;
125 use crate::intrinsics;
127 use crate::hint::spin_loop;
129 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
131 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
132 /// power or switching hyper-threads.
134 /// This function is different from [`std::thread::yield_now`] which directly yields to the
135 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
137 /// A common use case for `spin_loop_hint` is implementing bounded optimistic spinning in a CAS
138 /// loop in synchronization primitives. To avoid problems like priority inversion, it is strongly
139 /// recommended that the spin loop is terminated after a finite amount of iterations and an
140 /// appropriate blocking syscall is made.
142 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
143 /// do anything at all.
145 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
146 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
147 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
149 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
150 pub fn spin_loop_hint() {
154 /// A boolean type which can be safely shared between threads.
156 /// This type has the same in-memory representation as a [`bool`].
158 /// [`bool`]: ../../../std/primitive.bool.html
159 #[cfg(target_has_atomic_load_store = "8")]
160 #[stable(feature = "rust1", since = "1.0.0")]
162 pub struct AtomicBool {
166 #[cfg(target_has_atomic_load_store = "8")]
167 #[stable(feature = "rust1", since = "1.0.0")]
168 impl Default for AtomicBool {
169 /// Creates an `AtomicBool` initialized to `false`.
170 fn default() -> Self {
175 // Send is implicitly implemented for AtomicBool.
176 #[cfg(target_has_atomic_load_store = "8")]
177 #[stable(feature = "rust1", since = "1.0.0")]
178 unsafe impl Sync for AtomicBool {}
180 /// A raw pointer type which can be safely shared between threads.
182 /// This type has the same in-memory representation as a `*mut T`.
183 #[cfg(target_has_atomic_load_store = "ptr")]
184 #[stable(feature = "rust1", since = "1.0.0")]
185 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
186 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
187 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
188 pub struct AtomicPtr<T> {
189 p: UnsafeCell<*mut T>,
192 #[cfg(target_has_atomic_load_store = "ptr")]
193 #[stable(feature = "rust1", since = "1.0.0")]
194 impl<T> Default for AtomicPtr<T> {
195 /// Creates a null `AtomicPtr<T>`.
196 fn default() -> AtomicPtr<T> {
197 AtomicPtr::new(crate::ptr::null_mut())
201 #[cfg(target_has_atomic_load_store = "ptr")]
202 #[stable(feature = "rust1", since = "1.0.0")]
203 unsafe impl<T> Send for AtomicPtr<T> {}
204 #[cfg(target_has_atomic_load_store = "ptr")]
205 #[stable(feature = "rust1", since = "1.0.0")]
206 unsafe impl<T> Sync for AtomicPtr<T> {}
208 /// Atomic memory orderings
210 /// Memory orderings specify the way atomic operations synchronize memory.
211 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
212 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
213 /// operations synchronize other memory while additionally preserving a total order of such
214 /// operations across all threads.
216 /// Rust's memory orderings are [the same as those of
217 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
219 /// For more information see the [nomicon].
221 /// [nomicon]: ../../../nomicon/atomics.html
222 /// [Ordering::Relaxed]: #variant.Relaxed
223 /// [Ordering::SeqCst]: #variant.SeqCst
224 #[stable(feature = "rust1", since = "1.0.0")]
225 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
228 /// No ordering constraints, only atomic operations.
230 /// Corresponds to [`memory_order_relaxed`] in C++20.
232 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
233 #[stable(feature = "rust1", since = "1.0.0")]
235 /// When coupled with a store, all previous operations become ordered
236 /// before any load of this value with [`Acquire`] (or stronger) ordering.
237 /// In particular, all previous writes become visible to all threads
238 /// that perform an [`Acquire`] (or stronger) load of this value.
240 /// Notice that using this ordering for an operation that combines loads
241 /// and stores leads to a [`Relaxed`] load operation!
243 /// This ordering is only applicable for operations that can perform a store.
245 /// Corresponds to [`memory_order_release`] in C++20.
247 /// [`Release`]: #variant.Release
248 /// [`Acquire`]: #variant.Acquire
249 /// [`Relaxed`]: #variant.Relaxed
250 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
251 #[stable(feature = "rust1", since = "1.0.0")]
253 /// When coupled with a load, if the loaded value was written by a store operation with
254 /// [`Release`] (or stronger) ordering, then all subsequent operations
255 /// become ordered after that store. In particular, all subsequent loads will see data
256 /// written before the store.
258 /// Notice that using this ordering for an operation that combines loads
259 /// and stores leads to a [`Relaxed`] store operation!
261 /// This ordering is only applicable for operations that can perform a load.
263 /// Corresponds to [`memory_order_acquire`] in C++20.
265 /// [`Acquire`]: #variant.Acquire
266 /// [`Release`]: #variant.Release
267 /// [`Relaxed`]: #variant.Relaxed
268 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
269 #[stable(feature = "rust1", since = "1.0.0")]
271 /// Has the effects of both [`Acquire`] and [`Release`] together:
272 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
274 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
275 /// not performing any store and hence it has just [`Acquire`] ordering. However,
276 /// `AcqRel` will never perform [`Relaxed`] accesses.
278 /// This ordering is only applicable for operations that combine both loads and stores.
280 /// Corresponds to [`memory_order_acq_rel`] in C++20.
282 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
283 /// [`Acquire`]: #variant.Acquire
284 /// [`Release`]: #variant.Release
285 /// [`Relaxed`]: #variant.Relaxed
286 #[stable(feature = "rust1", since = "1.0.0")]
288 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
289 /// operations, respectively) with the additional guarantee that all threads see all
290 /// sequentially consistent operations in the same order.
292 /// Corresponds to [`memory_order_seq_cst`] in C++20.
294 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
295 /// [`Acquire`]: #variant.Acquire
296 /// [`Release`]: #variant.Release
297 /// [`AcqRel`]: #variant.AcqRel
298 #[stable(feature = "rust1", since = "1.0.0")]
302 /// An [`AtomicBool`] initialized to `false`.
304 /// [`AtomicBool`]: struct.AtomicBool.html
305 #[cfg(target_has_atomic_load_store = "8")]
306 #[stable(feature = "rust1", since = "1.0.0")]
309 reason = "the `new` function is now preferred",
310 suggestion = "AtomicBool::new(false)"
312 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
314 #[cfg(target_has_atomic_load_store = "8")]
316 /// Creates a new `AtomicBool`.
321 /// use std::sync::atomic::AtomicBool;
323 /// let atomic_true = AtomicBool::new(true);
324 /// let atomic_false = AtomicBool::new(false);
327 #[stable(feature = "rust1", since = "1.0.0")]
328 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
329 pub const fn new(v: bool) -> AtomicBool {
330 AtomicBool { v: UnsafeCell::new(v as u8) }
333 /// Returns a mutable reference to the underlying [`bool`].
335 /// This is safe because the mutable reference guarantees that no other threads are
336 /// concurrently accessing the atomic data.
338 /// [`bool`]: ../../../std/primitive.bool.html
343 /// use std::sync::atomic::{AtomicBool, Ordering};
345 /// let mut some_bool = AtomicBool::new(true);
346 /// assert_eq!(*some_bool.get_mut(), true);
347 /// *some_bool.get_mut() = false;
348 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
351 #[stable(feature = "atomic_access", since = "1.15.0")]
352 pub fn get_mut(&mut self) -> &mut bool {
353 unsafe { &mut *(self.v.get() as *mut bool) }
356 /// Consumes the atomic and returns the contained value.
358 /// This is safe because passing `self` by value guarantees that no other threads are
359 /// concurrently accessing the atomic data.
364 /// use std::sync::atomic::AtomicBool;
366 /// let some_bool = AtomicBool::new(true);
367 /// assert_eq!(some_bool.into_inner(), true);
370 #[stable(feature = "atomic_access", since = "1.15.0")]
371 pub fn into_inner(self) -> bool {
372 self.v.into_inner() != 0
375 /// Loads a value from the bool.
377 /// `load` takes an [`Ordering`] argument which describes the memory ordering
378 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
382 /// Panics if `order` is [`Release`] or [`AcqRel`].
384 /// [`Ordering`]: enum.Ordering.html
385 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
386 /// [`Release`]: enum.Ordering.html#variant.Release
387 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
388 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
389 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
394 /// use std::sync::atomic::{AtomicBool, Ordering};
396 /// let some_bool = AtomicBool::new(true);
398 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
401 #[stable(feature = "rust1", since = "1.0.0")]
402 pub fn load(&self, order: Ordering) -> bool {
403 unsafe { atomic_load(self.v.get(), order) != 0 }
406 /// Stores a value into the bool.
408 /// `store` takes an [`Ordering`] argument which describes the memory ordering
409 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
413 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
415 /// [`Ordering`]: enum.Ordering.html
416 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
417 /// [`Release`]: enum.Ordering.html#variant.Release
418 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
419 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
420 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
425 /// use std::sync::atomic::{AtomicBool, Ordering};
427 /// let some_bool = AtomicBool::new(true);
429 /// some_bool.store(false, Ordering::Relaxed);
430 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
433 #[stable(feature = "rust1", since = "1.0.0")]
434 pub fn store(&self, val: bool, order: Ordering) {
436 atomic_store(self.v.get(), val as u8, order);
440 /// Stores a value into the bool, returning the previous value.
442 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
443 /// of this operation. All ordering modes are possible. Note that using
444 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
445 /// using [`Release`] makes the load part [`Relaxed`].
447 /// [`Ordering`]: enum.Ordering.html
448 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
449 /// [`Release`]: enum.Ordering.html#variant.Release
450 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
455 /// use std::sync::atomic::{AtomicBool, Ordering};
457 /// let some_bool = AtomicBool::new(true);
459 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
460 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
463 #[stable(feature = "rust1", since = "1.0.0")]
464 #[cfg(target_has_atomic = "8")]
465 pub fn swap(&self, val: bool, order: Ordering) -> bool {
466 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
469 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
471 /// The return value is always the previous value. If it is equal to `current`, then the value
474 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
475 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
476 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
477 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
478 /// happens, and using [`Release`] makes the load part [`Relaxed`].
480 /// [`Ordering`]: enum.Ordering.html
481 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
482 /// [`Release`]: enum.Ordering.html#variant.Release
483 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
484 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
485 /// [`bool`]: ../../../std/primitive.bool.html
490 /// use std::sync::atomic::{AtomicBool, Ordering};
492 /// let some_bool = AtomicBool::new(true);
494 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
495 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
497 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
498 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
501 #[stable(feature = "rust1", since = "1.0.0")]
502 #[cfg(target_has_atomic = "8")]
503 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
504 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
510 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
512 /// The return value is a result indicating whether the new value was written and containing
513 /// the previous value. On success this value is guaranteed to be equal to `current`.
515 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
516 /// ordering of this operation. The first describes the required ordering if the
517 /// operation succeeds while the second describes the required ordering when the
518 /// operation fails. Using [`Acquire`] as success ordering makes the store part
519 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
520 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
521 /// and must be equivalent to or weaker than the success ordering.
524 /// [`bool`]: ../../../std/primitive.bool.html
525 /// [`Ordering`]: enum.Ordering.html
526 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
527 /// [`Release`]: enum.Ordering.html#variant.Release
528 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
529 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
534 /// use std::sync::atomic::{AtomicBool, Ordering};
536 /// let some_bool = AtomicBool::new(true);
538 /// assert_eq!(some_bool.compare_exchange(true,
540 /// Ordering::Acquire,
541 /// Ordering::Relaxed),
543 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
545 /// assert_eq!(some_bool.compare_exchange(true, true,
546 /// Ordering::SeqCst,
547 /// Ordering::Acquire),
549 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
552 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
553 #[cfg(target_has_atomic = "8")]
554 pub fn compare_exchange(
560 ) -> Result<bool, bool> {
562 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
565 Err(x) => Err(x != 0),
569 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
571 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
572 /// comparison succeeds, which can result in more efficient code on some platforms. The
573 /// return value is a result indicating whether the new value was written and containing the
576 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
577 /// ordering of this operation. The first describes the required ordering if the
578 /// operation succeeds while the second describes the required ordering when the
579 /// operation fails. Using [`Acquire`] as success ordering makes the store part
580 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
581 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
582 /// and must be equivalent to or weaker than the success ordering.
584 /// [`bool`]: ../../../std/primitive.bool.html
585 /// [`compare_exchange`]: #method.compare_exchange
586 /// [`Ordering`]: enum.Ordering.html
587 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
588 /// [`Release`]: enum.Ordering.html#variant.Release
589 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
590 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
595 /// use std::sync::atomic::{AtomicBool, Ordering};
597 /// let val = AtomicBool::new(false);
600 /// let mut old = val.load(Ordering::Relaxed);
602 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
604 /// Err(x) => old = x,
609 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
610 #[cfg(target_has_atomic = "8")]
611 pub fn compare_exchange_weak(
617 ) -> Result<bool, bool> {
619 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
622 Err(x) => Err(x != 0),
626 /// Logical "and" with a boolean value.
628 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
629 /// the new value to the result.
631 /// Returns the previous value.
633 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
634 /// of this operation. All ordering modes are possible. Note that using
635 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
636 /// using [`Release`] makes the load part [`Relaxed`].
638 /// [`Ordering`]: enum.Ordering.html
639 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
640 /// [`Release`]: enum.Ordering.html#variant.Release
641 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
646 /// use std::sync::atomic::{AtomicBool, Ordering};
648 /// let foo = AtomicBool::new(true);
649 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
650 /// assert_eq!(foo.load(Ordering::SeqCst), false);
652 /// let foo = AtomicBool::new(true);
653 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
654 /// assert_eq!(foo.load(Ordering::SeqCst), true);
656 /// let foo = AtomicBool::new(false);
657 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
658 /// assert_eq!(foo.load(Ordering::SeqCst), false);
661 #[stable(feature = "rust1", since = "1.0.0")]
662 #[cfg(target_has_atomic = "8")]
663 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
664 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
667 /// Logical "nand" with a boolean value.
669 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
670 /// the new value to the result.
672 /// Returns the previous value.
674 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
675 /// of this operation. All ordering modes are possible. Note that using
676 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
677 /// using [`Release`] makes the load part [`Relaxed`].
679 /// [`Ordering`]: enum.Ordering.html
680 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
681 /// [`Release`]: enum.Ordering.html#variant.Release
682 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
687 /// use std::sync::atomic::{AtomicBool, Ordering};
689 /// let foo = AtomicBool::new(true);
690 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
691 /// assert_eq!(foo.load(Ordering::SeqCst), true);
693 /// let foo = AtomicBool::new(true);
694 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
695 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
696 /// assert_eq!(foo.load(Ordering::SeqCst), false);
698 /// let foo = AtomicBool::new(false);
699 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
700 /// assert_eq!(foo.load(Ordering::SeqCst), true);
703 #[stable(feature = "rust1", since = "1.0.0")]
704 #[cfg(target_has_atomic = "8")]
705 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
706 // We can't use atomic_nand here because it can result in a bool with
707 // an invalid value. This happens because the atomic operation is done
708 // with an 8-bit integer internally, which would set the upper 7 bits.
709 // So we just use fetch_xor or swap instead.
712 // We must invert the bool.
713 self.fetch_xor(true, order)
715 // !(x & false) == true
716 // We must set the bool to true.
717 self.swap(true, order)
721 /// Logical "or" with a boolean value.
723 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
724 /// new value to the result.
726 /// Returns the previous value.
728 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
729 /// of this operation. All ordering modes are possible. Note that using
730 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
731 /// using [`Release`] makes the load part [`Relaxed`].
733 /// [`Ordering`]: enum.Ordering.html
734 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
735 /// [`Release`]: enum.Ordering.html#variant.Release
736 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
741 /// use std::sync::atomic::{AtomicBool, Ordering};
743 /// let foo = AtomicBool::new(true);
744 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
745 /// assert_eq!(foo.load(Ordering::SeqCst), true);
747 /// let foo = AtomicBool::new(true);
748 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
749 /// assert_eq!(foo.load(Ordering::SeqCst), true);
751 /// let foo = AtomicBool::new(false);
752 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
753 /// assert_eq!(foo.load(Ordering::SeqCst), false);
756 #[stable(feature = "rust1", since = "1.0.0")]
757 #[cfg(target_has_atomic = "8")]
758 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
759 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
762 /// Logical "xor" with a boolean value.
764 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
765 /// the new value to the result.
767 /// Returns the previous value.
769 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
770 /// of this operation. All ordering modes are possible. Note that using
771 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
772 /// using [`Release`] makes the load part [`Relaxed`].
774 /// [`Ordering`]: enum.Ordering.html
775 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
776 /// [`Release`]: enum.Ordering.html#variant.Release
777 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
782 /// use std::sync::atomic::{AtomicBool, Ordering};
784 /// let foo = AtomicBool::new(true);
785 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
786 /// assert_eq!(foo.load(Ordering::SeqCst), true);
788 /// let foo = AtomicBool::new(true);
789 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
790 /// assert_eq!(foo.load(Ordering::SeqCst), false);
792 /// let foo = AtomicBool::new(false);
793 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
794 /// assert_eq!(foo.load(Ordering::SeqCst), false);
797 #[stable(feature = "rust1", since = "1.0.0")]
798 #[cfg(target_has_atomic = "8")]
799 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
800 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
803 /// Returns a mutable pointer to the underlying [`bool`].
805 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
806 /// This method is mostly useful for FFI, where the function signature may use
807 /// `*mut bool` instead of `&AtomicBool`.
809 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
810 /// atomic types work with interior mutability. All modifications of an atomic change the value
811 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
812 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
813 /// restriction: operations on it must be atomic.
815 /// [`bool`]: ../../../std/primitive.bool.html
819 /// ```ignore (extern-declaration)
821 /// use std::sync::atomic::AtomicBool;
823 /// fn my_atomic_op(arg: *mut bool);
826 /// let mut atomic = AtomicBool::new(true);
828 /// my_atomic_op(atomic.as_mut_ptr());
833 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
834 pub fn as_mut_ptr(&self) -> *mut bool {
835 self.v.get() as *mut bool
839 #[cfg(target_has_atomic_load_store = "ptr")]
840 impl<T> AtomicPtr<T> {
841 /// Creates a new `AtomicPtr`.
846 /// use std::sync::atomic::AtomicPtr;
848 /// let ptr = &mut 5;
849 /// let atomic_ptr = AtomicPtr::new(ptr);
852 #[stable(feature = "rust1", since = "1.0.0")]
853 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
854 pub const fn new(p: *mut T) -> AtomicPtr<T> {
855 AtomicPtr { p: UnsafeCell::new(p) }
858 /// Returns a mutable reference to the underlying pointer.
860 /// This is safe because the mutable reference guarantees that no other threads are
861 /// concurrently accessing the atomic data.
866 /// use std::sync::atomic::{AtomicPtr, Ordering};
868 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
869 /// *atomic_ptr.get_mut() = &mut 5;
870 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
873 #[stable(feature = "atomic_access", since = "1.15.0")]
874 pub fn get_mut(&mut self) -> &mut *mut T {
875 unsafe { &mut *self.p.get() }
878 /// Consumes the atomic and returns the contained value.
880 /// This is safe because passing `self` by value guarantees that no other threads are
881 /// concurrently accessing the atomic data.
886 /// use std::sync::atomic::AtomicPtr;
888 /// let atomic_ptr = AtomicPtr::new(&mut 5);
889 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
892 #[stable(feature = "atomic_access", since = "1.15.0")]
893 pub fn into_inner(self) -> *mut T {
897 /// Loads a value from the pointer.
899 /// `load` takes an [`Ordering`] argument which describes the memory ordering
900 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
904 /// Panics if `order` is [`Release`] or [`AcqRel`].
906 /// [`Ordering`]: enum.Ordering.html
907 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
908 /// [`Release`]: enum.Ordering.html#variant.Release
909 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
910 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
911 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
916 /// use std::sync::atomic::{AtomicPtr, Ordering};
918 /// let ptr = &mut 5;
919 /// let some_ptr = AtomicPtr::new(ptr);
921 /// let value = some_ptr.load(Ordering::Relaxed);
924 #[stable(feature = "rust1", since = "1.0.0")]
925 pub fn load(&self, order: Ordering) -> *mut T {
926 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
929 /// Stores a value into the pointer.
931 /// `store` takes an [`Ordering`] argument which describes the memory ordering
932 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
936 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
938 /// [`Ordering`]: enum.Ordering.html
939 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
940 /// [`Release`]: enum.Ordering.html#variant.Release
941 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
942 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
943 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
948 /// use std::sync::atomic::{AtomicPtr, Ordering};
950 /// let ptr = &mut 5;
951 /// let some_ptr = AtomicPtr::new(ptr);
953 /// let other_ptr = &mut 10;
955 /// some_ptr.store(other_ptr, Ordering::Relaxed);
958 #[stable(feature = "rust1", since = "1.0.0")]
959 pub fn store(&self, ptr: *mut T, order: Ordering) {
961 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
965 /// Stores a value into the pointer, returning the previous value.
967 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
968 /// of this operation. All ordering modes are possible. Note that using
969 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
970 /// using [`Release`] makes the load part [`Relaxed`].
972 /// [`Ordering`]: enum.Ordering.html
973 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
974 /// [`Release`]: enum.Ordering.html#variant.Release
975 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
980 /// use std::sync::atomic::{AtomicPtr, Ordering};
982 /// let ptr = &mut 5;
983 /// let some_ptr = AtomicPtr::new(ptr);
985 /// let other_ptr = &mut 10;
987 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
990 #[stable(feature = "rust1", since = "1.0.0")]
991 #[cfg(target_has_atomic = "ptr")]
992 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
993 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
996 /// Stores a value into the pointer if the current value is the same as the `current` value.
998 /// The return value is always the previous value. If it is equal to `current`, then the value
1001 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1002 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1003 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1004 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1005 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1007 /// [`Ordering`]: enum.Ordering.html
1008 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1009 /// [`Release`]: enum.Ordering.html#variant.Release
1010 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1011 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1016 /// use std::sync::atomic::{AtomicPtr, Ordering};
1018 /// let ptr = &mut 5;
1019 /// let some_ptr = AtomicPtr::new(ptr);
1021 /// let other_ptr = &mut 10;
1023 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1026 #[stable(feature = "rust1", since = "1.0.0")]
1027 #[cfg(target_has_atomic = "ptr")]
1028 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1029 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1035 /// Stores a value into the pointer if the current value is the same as the `current` value.
1037 /// The return value is a result indicating whether the new value was written and containing
1038 /// the previous value. On success this value is guaranteed to be equal to `current`.
1040 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1041 /// ordering of this operation. The first describes the required ordering if the
1042 /// operation succeeds while the second describes the required ordering when the
1043 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1044 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1045 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1046 /// and must be equivalent to or weaker than the success ordering.
1048 /// [`Ordering`]: enum.Ordering.html
1049 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1050 /// [`Release`]: enum.Ordering.html#variant.Release
1051 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1052 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1057 /// use std::sync::atomic::{AtomicPtr, Ordering};
1059 /// let ptr = &mut 5;
1060 /// let some_ptr = AtomicPtr::new(ptr);
1062 /// let other_ptr = &mut 10;
1064 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1065 /// Ordering::SeqCst, Ordering::Relaxed);
1068 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1069 #[cfg(target_has_atomic = "ptr")]
1070 pub fn compare_exchange(
1076 ) -> Result<*mut T, *mut T> {
1078 let res = atomic_compare_exchange(
1079 self.p.get() as *mut usize,
1086 Ok(x) => Ok(x as *mut T),
1087 Err(x) => Err(x as *mut T),
1092 /// Stores a value into the pointer if the current value is the same as the `current` value.
1094 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1095 /// comparison succeeds, which can result in more efficient code on some platforms. The
1096 /// return value is a result indicating whether the new value was written and containing the
1099 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1100 /// ordering of this operation. The first describes the required ordering if the
1101 /// operation succeeds while the second describes the required ordering when the
1102 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1103 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1104 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1105 /// and must be equivalent to or weaker than the success ordering.
1107 /// [`compare_exchange`]: #method.compare_exchange
1108 /// [`Ordering`]: enum.Ordering.html
1109 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1110 /// [`Release`]: enum.Ordering.html#variant.Release
1111 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1112 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1117 /// use std::sync::atomic::{AtomicPtr, Ordering};
1119 /// let some_ptr = AtomicPtr::new(&mut 5);
1121 /// let new = &mut 10;
1122 /// let mut old = some_ptr.load(Ordering::Relaxed);
1124 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1126 /// Err(x) => old = x,
1131 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1132 #[cfg(target_has_atomic = "ptr")]
1133 pub fn compare_exchange_weak(
1139 ) -> Result<*mut T, *mut T> {
1141 let res = atomic_compare_exchange_weak(
1142 self.p.get() as *mut usize,
1149 Ok(x) => Ok(x as *mut T),
1150 Err(x) => Err(x as *mut T),
1156 #[cfg(target_has_atomic_load_store = "8")]
1157 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1158 impl From<bool> for AtomicBool {
1159 /// Converts a `bool` into an `AtomicBool`.
1164 /// use std::sync::atomic::AtomicBool;
1165 /// let atomic_bool = AtomicBool::from(true);
1166 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1169 fn from(b: bool) -> Self {
1174 #[cfg(target_has_atomic_load_store = "ptr")]
1175 #[stable(feature = "atomic_from", since = "1.23.0")]
1176 impl<T> From<*mut T> for AtomicPtr<T> {
1178 fn from(p: *mut T) -> Self {
1183 #[cfg(target_has_atomic_load_store = "8")]
1184 macro_rules! atomic_int {
1189 $stable_access:meta,
1193 $stable_init_const:meta,
1194 $s_int_type:expr, $int_ref:expr,
1195 $extra_feature:expr,
1196 $min_fn:ident, $max_fn:ident,
1199 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1200 /// An integer type which can be safely shared between threads.
1202 /// This type has the same in-memory representation as the underlying
1203 /// integer type, [`
1204 #[doc = $s_int_type]
1207 /// ). For more about the differences between atomic types and
1208 /// non-atomic types as well as information about the portability of
1209 /// this type, please see the [module-level documentation].
1211 /// [module-level documentation]: index.html
1213 #[repr(C, align($align))]
1214 pub struct $atomic_type {
1215 v: UnsafeCell<$int_type>,
1218 /// An atomic integer initialized to `0`.
1219 #[$stable_init_const]
1222 reason = "the `new` function is now preferred",
1223 suggestion = $atomic_new,
1225 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1228 impl Default for $atomic_type {
1229 fn default() -> Self {
1230 Self::new(Default::default())
1235 impl From<$int_type> for $atomic_type {
1238 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1240 fn from(v: $int_type) -> Self { Self::new(v) }
1245 impl fmt::Debug for $atomic_type {
1246 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1247 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1251 // Send is implicitly implemented.
1253 unsafe impl Sync for $atomic_type {}
1257 concat!("Creates a new atomic integer.
1262 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1264 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1269 pub const fn new(v: $int_type) -> Self {
1270 Self {v: UnsafeCell::new(v)}
1275 concat!("Returns a mutable reference to the underlying integer.
1277 This is safe because the mutable reference guarantees that no other threads are
1278 concurrently accessing the atomic data.
1283 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1285 let mut some_var = ", stringify!($atomic_type), "::new(10);
1286 assert_eq!(*some_var.get_mut(), 10);
1287 *some_var.get_mut() = 5;
1288 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1292 pub fn get_mut(&mut self) -> &mut $int_type {
1293 unsafe { &mut *self.v.get() }
1298 concat!("Consumes the atomic and returns the contained value.
1300 This is safe because passing `self` by value guarantees that no other threads are
1301 concurrently accessing the atomic data.
1306 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1308 let some_var = ", stringify!($atomic_type), "::new(5);
1309 assert_eq!(some_var.into_inner(), 5);
1313 pub fn into_inner(self) -> $int_type {
1319 concat!("Loads a value from the atomic integer.
1321 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1322 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1326 Panics if `order` is [`Release`] or [`AcqRel`].
1328 [`Ordering`]: enum.Ordering.html
1329 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1330 [`Release`]: enum.Ordering.html#variant.Release
1331 [`Acquire`]: enum.Ordering.html#variant.Acquire
1332 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1333 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1338 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1340 let some_var = ", stringify!($atomic_type), "::new(5);
1342 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1346 pub fn load(&self, order: Ordering) -> $int_type {
1347 unsafe { atomic_load(self.v.get(), order) }
1352 concat!("Stores a value into the atomic integer.
1354 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1355 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1359 Panics if `order` is [`Acquire`] or [`AcqRel`].
1361 [`Ordering`]: enum.Ordering.html
1362 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1363 [`Release`]: enum.Ordering.html#variant.Release
1364 [`Acquire`]: enum.Ordering.html#variant.Acquire
1365 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1366 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1371 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1373 let some_var = ", stringify!($atomic_type), "::new(5);
1375 some_var.store(10, Ordering::Relaxed);
1376 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1380 pub fn store(&self, val: $int_type, order: Ordering) {
1381 unsafe { atomic_store(self.v.get(), val, order); }
1386 concat!("Stores a value into the atomic integer, returning the previous value.
1388 `swap` takes an [`Ordering`] argument which describes the memory ordering
1389 of this operation. All ordering modes are possible. Note that using
1390 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1391 using [`Release`] makes the load part [`Relaxed`].
1393 [`Ordering`]: enum.Ordering.html
1394 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1395 [`Release`]: enum.Ordering.html#variant.Release
1396 [`Acquire`]: enum.Ordering.html#variant.Acquire
1401 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1403 let some_var = ", stringify!($atomic_type), "::new(5);
1405 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1410 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1411 unsafe { atomic_swap(self.v.get(), val, order) }
1416 concat!("Stores a value into the atomic integer if the current value is the same as
1417 the `current` value.
1419 The return value is always the previous value. If it is equal to `current`, then the
1422 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1423 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1424 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1425 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1426 happens, and using [`Release`] makes the load part [`Relaxed`].
1428 [`Ordering`]: enum.Ordering.html
1429 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1430 [`Release`]: enum.Ordering.html#variant.Release
1431 [`Acquire`]: enum.Ordering.html#variant.Acquire
1432 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1437 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1439 let some_var = ", stringify!($atomic_type), "::new(5);
1441 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1442 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1444 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1445 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1450 pub fn compare_and_swap(&self,
1453 order: Ordering) -> $int_type {
1454 match self.compare_exchange(current,
1457 strongest_failure_ordering(order)) {
1465 concat!("Stores a value into the atomic integer if the current value is the same as
1466 the `current` value.
1468 The return value is a result indicating whether the new value was written and
1469 containing the previous value. On success this value is guaranteed to be equal to
1472 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1473 ordering of this operation. The first describes the required ordering if the
1474 operation succeeds while the second describes the required ordering when the
1475 operation fails. Using [`Acquire`] as success ordering makes the store part
1476 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1477 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1478 and must be equivalent to or weaker than the success ordering.
1480 [`Ordering`]: enum.Ordering.html
1481 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1482 [`Release`]: enum.Ordering.html#variant.Release
1483 [`Acquire`]: enum.Ordering.html#variant.Acquire
1484 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1489 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1491 let some_var = ", stringify!($atomic_type), "::new(5);
1493 assert_eq!(some_var.compare_exchange(5, 10,
1497 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1499 assert_eq!(some_var.compare_exchange(6, 12,
1503 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1508 pub fn compare_exchange(&self,
1512 failure: Ordering) -> Result<$int_type, $int_type> {
1513 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1518 concat!("Stores a value into the atomic integer if the current value is the same as
1519 the `current` value.
1521 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1522 when the comparison succeeds, which can result in more efficient code on some
1523 platforms. The return value is a result indicating whether the new value was
1524 written and containing the previous value.
1526 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1527 ordering of this operation. The first describes the required ordering if the
1528 operation succeeds while the second describes the required ordering when the
1529 operation fails. Using [`Acquire`] as success ordering makes the store part
1530 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1531 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1532 and must be equivalent to or weaker than the success ordering.
1534 [`compare_exchange`]: #method.compare_exchange
1535 [`Ordering`]: enum.Ordering.html
1536 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1537 [`Release`]: enum.Ordering.html#variant.Release
1538 [`Acquire`]: enum.Ordering.html#variant.Acquire
1539 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1544 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1546 let val = ", stringify!($atomic_type), "::new(4);
1548 let mut old = val.load(Ordering::Relaxed);
1551 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1560 pub fn compare_exchange_weak(&self,
1564 failure: Ordering) -> Result<$int_type, $int_type> {
1566 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1572 concat!("Adds to the current value, returning the previous value.
1574 This operation wraps around on overflow.
1576 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1577 of this operation. All ordering modes are possible. Note that using
1578 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1579 using [`Release`] makes the load part [`Relaxed`].
1581 [`Ordering`]: enum.Ordering.html
1582 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1583 [`Release`]: enum.Ordering.html#variant.Release
1584 [`Acquire`]: enum.Ordering.html#variant.Acquire
1589 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1591 let foo = ", stringify!($atomic_type), "::new(0);
1592 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1593 assert_eq!(foo.load(Ordering::SeqCst), 10);
1598 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1599 unsafe { atomic_add(self.v.get(), val, order) }
1604 concat!("Subtracts from the current value, returning the previous value.
1606 This operation wraps around on overflow.
1608 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1609 of this operation. All ordering modes are possible. Note that using
1610 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1611 using [`Release`] makes the load part [`Relaxed`].
1613 [`Ordering`]: enum.Ordering.html
1614 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1615 [`Release`]: enum.Ordering.html#variant.Release
1616 [`Acquire`]: enum.Ordering.html#variant.Acquire
1621 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1623 let foo = ", stringify!($atomic_type), "::new(20);
1624 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1625 assert_eq!(foo.load(Ordering::SeqCst), 10);
1630 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1631 unsafe { atomic_sub(self.v.get(), val, order) }
1636 concat!("Bitwise \"and\" with the current value.
1638 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1639 sets the new value to the result.
1641 Returns the previous value.
1643 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1644 of this operation. All ordering modes are possible. Note that using
1645 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1646 using [`Release`] makes the load part [`Relaxed`].
1648 [`Ordering`]: enum.Ordering.html
1649 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1650 [`Release`]: enum.Ordering.html#variant.Release
1651 [`Acquire`]: enum.Ordering.html#variant.Acquire
1656 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1658 let foo = ", stringify!($atomic_type), "::new(0b101101);
1659 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1660 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1665 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1666 unsafe { atomic_and(self.v.get(), val, order) }
1671 concat!("Bitwise \"nand\" with the current value.
1673 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1674 sets the new value to the result.
1676 Returns the previous value.
1678 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1679 of this operation. All ordering modes are possible. Note that using
1680 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1681 using [`Release`] makes the load part [`Relaxed`].
1683 [`Ordering`]: enum.Ordering.html
1684 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1685 [`Release`]: enum.Ordering.html#variant.Release
1686 [`Acquire`]: enum.Ordering.html#variant.Acquire
1691 ", $extra_feature, "
1692 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1694 let foo = ", stringify!($atomic_type), "::new(0x13);
1695 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1696 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1701 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1702 unsafe { atomic_nand(self.v.get(), val, order) }
1707 concat!("Bitwise \"or\" with the current value.
1709 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1710 sets the new value to the result.
1712 Returns the previous value.
1714 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1715 of this operation. All ordering modes are possible. Note that using
1716 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1717 using [`Release`] makes the load part [`Relaxed`].
1719 [`Ordering`]: enum.Ordering.html
1720 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1721 [`Release`]: enum.Ordering.html#variant.Release
1722 [`Acquire`]: enum.Ordering.html#variant.Acquire
1727 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1729 let foo = ", stringify!($atomic_type), "::new(0b101101);
1730 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1731 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1736 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1737 unsafe { atomic_or(self.v.get(), val, order) }
1742 concat!("Bitwise \"xor\" with the current value.
1744 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1745 sets the new value to the result.
1747 Returns the previous value.
1749 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1750 of this operation. All ordering modes are possible. Note that using
1751 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1752 using [`Release`] makes the load part [`Relaxed`].
1754 [`Ordering`]: enum.Ordering.html
1755 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1756 [`Release`]: enum.Ordering.html#variant.Release
1757 [`Acquire`]: enum.Ordering.html#variant.Acquire
1762 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1764 let foo = ", stringify!($atomic_type), "::new(0b101101);
1765 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1766 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1771 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1772 unsafe { atomic_xor(self.v.get(), val, order) }
1777 concat!("Fetches the value, and applies a function to it that returns an optional
1778 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1779 `Err(previous_value)`.
1781 Note: This may call the function multiple times if the value has been changed from other threads in
1782 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1783 but once to the stored value.
1785 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1786 ordering of this operation. The first describes the required ordering for loads
1787 and failed updates while the second describes the required ordering when the
1788 operation finally succeeds. Beware that this is different from the two
1789 modes in [`compare_exchange`]!
1791 Using [`Acquire`] as success ordering makes the store part
1792 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1793 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1794 and must be equivalent to or weaker than the success ordering.
1796 [`bool`]: ../../../std/primitive.bool.html
1797 [`compare_exchange`]: #method.compare_exchange
1798 [`Ordering`]: enum.Ordering.html
1799 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1800 [`Release`]: enum.Ordering.html#variant.Release
1801 [`Acquire`]: enum.Ordering.html#variant.Acquire
1802 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1807 #![feature(no_more_cas)]
1808 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1810 let x = ", stringify!($atomic_type), "::new(7);
1811 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1812 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1813 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1814 assert_eq!(x.load(Ordering::SeqCst), 9);
1817 #[unstable(feature = "no_more_cas",
1818 reason = "no more CAS loops in user code",
1821 pub fn fetch_update<F>(&self,
1823 fetch_order: Ordering,
1824 set_order: Ordering) -> Result<$int_type, $int_type>
1825 where F: FnMut($int_type) -> Option<$int_type> {
1826 let mut prev = self.load(fetch_order);
1827 while let Some(next) = f(prev) {
1828 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1829 x @ Ok(_) => return x,
1830 Err(next_prev) => prev = next_prev
1838 concat!("Maximum with the current value.
1840 Finds the maximum of the current value and the argument `val`, and
1841 sets the new value to the result.
1843 Returns the previous value.
1845 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1846 of this operation. All ordering modes are possible. Note that using
1847 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1848 using [`Release`] makes the load part [`Relaxed`].
1850 [`Ordering`]: enum.Ordering.html
1851 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1852 [`Release`]: enum.Ordering.html#variant.Release
1853 [`Acquire`]: enum.Ordering.html#variant.Acquire
1858 #![feature(atomic_min_max)]
1859 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1861 let foo = ", stringify!($atomic_type), "::new(23);
1862 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1863 assert_eq!(foo.load(Ordering::SeqCst), 42);
1866 If you want to obtain the maximum value in one step, you can use the following:
1869 #![feature(atomic_min_max)]
1870 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1872 let foo = ", stringify!($atomic_type), "::new(23);
1874 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1875 assert!(max_foo == 42);
1878 #[unstable(feature = "atomic_min_max",
1879 reason = "easier and faster min/max than writing manual CAS loop",
1882 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1883 unsafe { $max_fn(self.v.get(), val, order) }
1888 concat!("Minimum with the current value.
1890 Finds the minimum of the current value and the argument `val`, and
1891 sets the new value to the result.
1893 Returns the previous value.
1895 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1896 of this operation. All ordering modes are possible. Note that using
1897 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1898 using [`Release`] makes the load part [`Relaxed`].
1900 [`Ordering`]: enum.Ordering.html
1901 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1902 [`Release`]: enum.Ordering.html#variant.Release
1903 [`Acquire`]: enum.Ordering.html#variant.Acquire
1908 #![feature(atomic_min_max)]
1909 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1911 let foo = ", stringify!($atomic_type), "::new(23);
1912 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1913 assert_eq!(foo.load(Ordering::Relaxed), 23);
1914 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1915 assert_eq!(foo.load(Ordering::Relaxed), 22);
1918 If you want to obtain the minimum value in one step, you can use the following:
1921 #![feature(atomic_min_max)]
1922 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1924 let foo = ", stringify!($atomic_type), "::new(23);
1926 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1927 assert_eq!(min_foo, 12);
1930 #[unstable(feature = "atomic_min_max",
1931 reason = "easier and faster min/max than writing manual CAS loop",
1934 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1935 unsafe { $min_fn(self.v.get(), val, order) }
1940 concat!("Returns a mutable pointer to the underlying integer.
1942 Doing non-atomic reads and writes on the resulting integer can be a data race.
1943 This method is mostly useful for FFI, where the function signature may use
1944 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1946 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1947 atomic types work with interior mutability. All modifications of an atomic change the value
1948 through a shared reference, and can do so safely as long as they use atomic operations. Any
1949 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
1950 restriction: operations on it must be atomic.
1954 ```ignore (extern-declaration)
1956 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1959 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1962 let mut atomic = ", stringify!($atomic_type), "::new(1);
1964 my_atomic_op(atomic.as_mut_ptr());
1969 #[unstable(feature = "atomic_mut_ptr",
1970 reason = "recently added",
1972 pub fn as_mut_ptr(&self) -> *mut $int_type {
1980 #[cfg(target_has_atomic_load_store = "8")]
1982 cfg(target_has_atomic = "8"),
1983 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1984 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1985 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1986 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1987 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1988 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1989 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
1990 unstable(feature = "integer_atomics", issue = "32976"),
1991 "i8", "../../../std/primitive.i8.html",
1993 atomic_min, atomic_max,
1996 i8 AtomicI8 ATOMIC_I8_INIT
1998 #[cfg(target_has_atomic_load_store = "8")]
2000 cfg(target_has_atomic = "8"),
2001 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2002 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2003 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2004 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2005 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2006 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2007 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2008 unstable(feature = "integer_atomics", issue = "32976"),
2009 "u8", "../../../std/primitive.u8.html",
2011 atomic_umin, atomic_umax,
2014 u8 AtomicU8 ATOMIC_U8_INIT
2016 #[cfg(target_has_atomic_load_store = "16")]
2018 cfg(target_has_atomic = "16"),
2019 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2020 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2021 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2022 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2023 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2024 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2025 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2026 unstable(feature = "integer_atomics", issue = "32976"),
2027 "i16", "../../../std/primitive.i16.html",
2029 atomic_min, atomic_max,
2031 "AtomicI16::new(0)",
2032 i16 AtomicI16 ATOMIC_I16_INIT
2034 #[cfg(target_has_atomic_load_store = "16")]
2036 cfg(target_has_atomic = "16"),
2037 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2038 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2039 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2040 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2041 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2042 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2043 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2044 unstable(feature = "integer_atomics", issue = "32976"),
2045 "u16", "../../../std/primitive.u16.html",
2047 atomic_umin, atomic_umax,
2049 "AtomicU16::new(0)",
2050 u16 AtomicU16 ATOMIC_U16_INIT
2052 #[cfg(target_has_atomic_load_store = "32")]
2054 cfg(target_has_atomic = "32"),
2055 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2056 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2057 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2058 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2059 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2060 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2061 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2062 unstable(feature = "integer_atomics", issue = "32976"),
2063 "i32", "../../../std/primitive.i32.html",
2065 atomic_min, atomic_max,
2067 "AtomicI32::new(0)",
2068 i32 AtomicI32 ATOMIC_I32_INIT
2070 #[cfg(target_has_atomic_load_store = "32")]
2072 cfg(target_has_atomic = "32"),
2073 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2074 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2075 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2076 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2077 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2078 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2079 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2080 unstable(feature = "integer_atomics", issue = "32976"),
2081 "u32", "../../../std/primitive.u32.html",
2083 atomic_umin, atomic_umax,
2085 "AtomicU32::new(0)",
2086 u32 AtomicU32 ATOMIC_U32_INIT
2088 #[cfg(target_has_atomic_load_store = "64")]
2090 cfg(target_has_atomic = "64"),
2091 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2092 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2093 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2094 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2095 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2096 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2097 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2098 unstable(feature = "integer_atomics", issue = "32976"),
2099 "i64", "../../../std/primitive.i64.html",
2101 atomic_min, atomic_max,
2103 "AtomicI64::new(0)",
2104 i64 AtomicI64 ATOMIC_I64_INIT
2106 #[cfg(target_has_atomic_load_store = "64")]
2108 cfg(target_has_atomic = "64"),
2109 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2110 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2111 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2112 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2113 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2114 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2115 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2116 unstable(feature = "integer_atomics", issue = "32976"),
2117 "u64", "../../../std/primitive.u64.html",
2119 atomic_umin, atomic_umax,
2121 "AtomicU64::new(0)",
2122 u64 AtomicU64 ATOMIC_U64_INIT
2124 #[cfg(target_has_atomic_load_store = "128")]
2126 cfg(target_has_atomic = "128"),
2127 unstable(feature = "integer_atomics", issue = "32976"),
2128 unstable(feature = "integer_atomics", issue = "32976"),
2129 unstable(feature = "integer_atomics", issue = "32976"),
2130 unstable(feature = "integer_atomics", issue = "32976"),
2131 unstable(feature = "integer_atomics", issue = "32976"),
2132 unstable(feature = "integer_atomics", issue = "32976"),
2133 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2134 unstable(feature = "integer_atomics", issue = "32976"),
2135 "i128", "../../../std/primitive.i128.html",
2136 "#![feature(integer_atomics)]\n\n",
2137 atomic_min, atomic_max,
2139 "AtomicI128::new(0)",
2140 i128 AtomicI128 ATOMIC_I128_INIT
2142 #[cfg(target_has_atomic_load_store = "128")]
2144 cfg(target_has_atomic = "128"),
2145 unstable(feature = "integer_atomics", issue = "32976"),
2146 unstable(feature = "integer_atomics", issue = "32976"),
2147 unstable(feature = "integer_atomics", issue = "32976"),
2148 unstable(feature = "integer_atomics", issue = "32976"),
2149 unstable(feature = "integer_atomics", issue = "32976"),
2150 unstable(feature = "integer_atomics", issue = "32976"),
2151 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2152 unstable(feature = "integer_atomics", issue = "32976"),
2153 "u128", "../../../std/primitive.u128.html",
2154 "#![feature(integer_atomics)]\n\n",
2155 atomic_umin, atomic_umax,
2157 "AtomicU128::new(0)",
2158 u128 AtomicU128 ATOMIC_U128_INIT
2160 #[cfg(target_has_atomic_load_store = "ptr")]
2161 #[cfg(target_pointer_width = "16")]
2162 macro_rules! ptr_width {
2167 #[cfg(target_has_atomic_load_store = "ptr")]
2168 #[cfg(target_pointer_width = "32")]
2169 macro_rules! ptr_width {
2174 #[cfg(target_has_atomic_load_store = "ptr")]
2175 #[cfg(target_pointer_width = "64")]
2176 macro_rules! ptr_width {
2181 #[cfg(target_has_atomic_load_store = "ptr")]
2183 cfg(target_has_atomic = "ptr"),
2184 stable(feature = "rust1", since = "1.0.0"),
2185 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2186 stable(feature = "atomic_debug", since = "1.3.0"),
2187 stable(feature = "atomic_access", since = "1.15.0"),
2188 stable(feature = "atomic_from", since = "1.23.0"),
2189 stable(feature = "atomic_nand", since = "1.27.0"),
2190 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2191 stable(feature = "rust1", since = "1.0.0"),
2192 "isize", "../../../std/primitive.isize.html",
2194 atomic_min, atomic_max,
2196 "AtomicIsize::new(0)",
2197 isize AtomicIsize ATOMIC_ISIZE_INIT
2199 #[cfg(target_has_atomic_load_store = "ptr")]
2201 cfg(target_has_atomic = "ptr"),
2202 stable(feature = "rust1", since = "1.0.0"),
2203 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2204 stable(feature = "atomic_debug", since = "1.3.0"),
2205 stable(feature = "atomic_access", since = "1.15.0"),
2206 stable(feature = "atomic_from", since = "1.23.0"),
2207 stable(feature = "atomic_nand", since = "1.27.0"),
2208 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2209 stable(feature = "rust1", since = "1.0.0"),
2210 "usize", "../../../std/primitive.usize.html",
2212 atomic_umin, atomic_umax,
2214 "AtomicUsize::new(0)",
2215 usize AtomicUsize ATOMIC_USIZE_INIT
2219 #[cfg(target_has_atomic = "8")]
2220 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2231 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2233 Release => intrinsics::atomic_store_rel(dst, val),
2234 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2235 SeqCst => intrinsics::atomic_store(dst, val),
2236 Acquire => panic!("there is no such thing as an acquire store"),
2237 AcqRel => panic!("there is no such thing as an acquire/release store"),
2242 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2244 Acquire => intrinsics::atomic_load_acq(dst),
2245 Relaxed => intrinsics::atomic_load_relaxed(dst),
2246 SeqCst => intrinsics::atomic_load(dst),
2247 Release => panic!("there is no such thing as a release load"),
2248 AcqRel => panic!("there is no such thing as an acquire/release load"),
2253 #[cfg(target_has_atomic = "8")]
2254 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2256 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2257 Release => intrinsics::atomic_xchg_rel(dst, val),
2258 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2259 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2260 SeqCst => intrinsics::atomic_xchg(dst, val),
2264 /// Returns the previous value (like __sync_fetch_and_add).
2266 #[cfg(target_has_atomic = "8")]
2267 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2269 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2270 Release => intrinsics::atomic_xadd_rel(dst, val),
2271 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2272 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2273 SeqCst => intrinsics::atomic_xadd(dst, val),
2277 /// Returns the previous value (like __sync_fetch_and_sub).
2279 #[cfg(target_has_atomic = "8")]
2280 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2282 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2283 Release => intrinsics::atomic_xsub_rel(dst, val),
2284 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2285 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2286 SeqCst => intrinsics::atomic_xsub(dst, val),
2291 #[cfg(target_has_atomic = "8")]
2292 unsafe fn atomic_compare_exchange<T>(
2299 let (val, ok) = match (success, failure) {
2300 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2301 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2302 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2303 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2304 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2305 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2306 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2307 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2308 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2309 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2310 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2311 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2313 if ok { Ok(val) } else { Err(val) }
2317 #[cfg(target_has_atomic = "8")]
2318 unsafe fn atomic_compare_exchange_weak<T>(
2325 let (val, ok) = match (success, failure) {
2326 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2327 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2328 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2329 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2330 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2331 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2332 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2333 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2334 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2335 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2336 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2337 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2339 if ok { Ok(val) } else { Err(val) }
2343 #[cfg(target_has_atomic = "8")]
2344 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2346 Acquire => intrinsics::atomic_and_acq(dst, val),
2347 Release => intrinsics::atomic_and_rel(dst, val),
2348 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2349 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2350 SeqCst => intrinsics::atomic_and(dst, val),
2355 #[cfg(target_has_atomic = "8")]
2356 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2358 Acquire => intrinsics::atomic_nand_acq(dst, val),
2359 Release => intrinsics::atomic_nand_rel(dst, val),
2360 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2361 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2362 SeqCst => intrinsics::atomic_nand(dst, val),
2367 #[cfg(target_has_atomic = "8")]
2368 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2370 Acquire => intrinsics::atomic_or_acq(dst, val),
2371 Release => intrinsics::atomic_or_rel(dst, val),
2372 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2373 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2374 SeqCst => intrinsics::atomic_or(dst, val),
2379 #[cfg(target_has_atomic = "8")]
2380 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2382 Acquire => intrinsics::atomic_xor_acq(dst, val),
2383 Release => intrinsics::atomic_xor_rel(dst, val),
2384 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2385 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2386 SeqCst => intrinsics::atomic_xor(dst, val),
2390 /// returns the max value (signed comparison)
2392 #[cfg(target_has_atomic = "8")]
2393 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2395 Acquire => intrinsics::atomic_max_acq(dst, val),
2396 Release => intrinsics::atomic_max_rel(dst, val),
2397 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2398 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2399 SeqCst => intrinsics::atomic_max(dst, val),
2403 /// returns the min value (signed comparison)
2405 #[cfg(target_has_atomic = "8")]
2406 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2408 Acquire => intrinsics::atomic_min_acq(dst, val),
2409 Release => intrinsics::atomic_min_rel(dst, val),
2410 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2411 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2412 SeqCst => intrinsics::atomic_min(dst, val),
2416 /// returns the max value (signed comparison)
2418 #[cfg(target_has_atomic = "8")]
2419 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2421 Acquire => intrinsics::atomic_umax_acq(dst, val),
2422 Release => intrinsics::atomic_umax_rel(dst, val),
2423 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2424 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2425 SeqCst => intrinsics::atomic_umax(dst, val),
2429 /// returns the min value (signed comparison)
2431 #[cfg(target_has_atomic = "8")]
2432 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2434 Acquire => intrinsics::atomic_umin_acq(dst, val),
2435 Release => intrinsics::atomic_umin_rel(dst, val),
2436 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2437 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2438 SeqCst => intrinsics::atomic_umin(dst, val),
2442 /// An atomic fence.
2444 /// Depending on the specified order, a fence prevents the compiler and CPU from
2445 /// reordering certain types of memory operations around it.
2446 /// That creates synchronizes-with relationships between it and atomic operations
2447 /// or fences in other threads.
2449 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2450 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2451 /// exist operations X and Y, both operating on some atomic object 'M' such
2452 /// that A is sequenced before X, Y is synchronized before B and Y observes
2453 /// the change to M. This provides a happens-before dependence between A and B.
2456 /// Thread 1 Thread 2
2458 /// fence(Release); A --------------
2459 /// x.store(3, Relaxed); X --------- |
2462 /// -------------> Y if x.load(Relaxed) == 3 {
2463 /// |-------> B fence(Acquire);
2468 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2471 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2472 /// and [`Release`] semantics, participates in the global program order of the
2473 /// other [`SeqCst`] operations and/or fences.
2475 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2479 /// Panics if `order` is [`Relaxed`].
2484 /// use std::sync::atomic::AtomicBool;
2485 /// use std::sync::atomic::fence;
2486 /// use std::sync::atomic::Ordering;
2488 /// // A mutual exclusion primitive based on spinlock.
2489 /// pub struct Mutex {
2490 /// flag: AtomicBool,
2494 /// pub fn new() -> Mutex {
2496 /// flag: AtomicBool::new(false),
2500 /// pub fn lock(&self) {
2501 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2502 /// // This fence synchronizes-with store in `unlock`.
2503 /// fence(Ordering::Acquire);
2506 /// pub fn unlock(&self) {
2507 /// self.flag.store(false, Ordering::Release);
2512 /// [`Ordering`]: enum.Ordering.html
2513 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2514 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2515 /// [`Release`]: enum.Ordering.html#variant.Release
2516 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2517 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2519 #[stable(feature = "rust1", since = "1.0.0")]
2520 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2521 pub fn fence(order: Ordering) {
2522 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2523 // they will cause LLVM to abort. The wasm instruction set doesn't have
2524 // fences right now. There's discussion online about the best way for tools
2525 // to conventionally implement fences at
2526 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2527 // follow that discussion and implement a solution when one comes about!
2528 #[cfg(not(target_arch = "wasm32"))]
2531 Acquire => intrinsics::atomic_fence_acq(),
2532 Release => intrinsics::atomic_fence_rel(),
2533 AcqRel => intrinsics::atomic_fence_acqrel(),
2534 SeqCst => intrinsics::atomic_fence(),
2535 Relaxed => panic!("there is no such thing as a relaxed fence"),
2540 /// A compiler memory fence.
2542 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2543 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2544 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2545 /// or writes from before or after the call to the other side of the call to
2546 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2547 /// from doing such re-ordering. This is not a problem in a single-threaded,
2548 /// execution context, but when other threads may modify memory at the same
2549 /// time, stronger synchronization primitives such as [`fence`] are required.
2551 /// The re-ordering prevented by the different ordering semantics are:
2553 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2554 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2555 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2556 /// - with [`AcqRel`], both of the above rules are enforced.
2558 /// `compiler_fence` is generally only useful for preventing a thread from
2559 /// racing *with itself*. That is, if a given thread is executing one piece
2560 /// of code, and is then interrupted, and starts executing code elsewhere
2561 /// (while still in the same thread, and conceptually still on the same
2562 /// core). In traditional programs, this can only occur when a signal
2563 /// handler is registered. In more low-level code, such situations can also
2564 /// arise when handling interrupts, when implementing green threads with
2565 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2566 /// discussion of [memory barriers].
2570 /// Panics if `order` is [`Relaxed`].
2574 /// Without `compiler_fence`, the `assert_eq!` in following code
2575 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2576 /// To see why, remember that the compiler is free to swap the stores to
2577 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2578 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2579 /// after `IS_READY` is updated, then the signal handler will see
2580 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2581 /// Using a `compiler_fence` remedies this situation.
2584 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2585 /// use std::sync::atomic::Ordering;
2586 /// use std::sync::atomic::compiler_fence;
2588 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2589 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2592 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2593 /// // prevent earlier writes from being moved beyond this point
2594 /// compiler_fence(Ordering::Release);
2595 /// IS_READY.store(true, Ordering::Relaxed);
2598 /// fn signal_handler() {
2599 /// if IS_READY.load(Ordering::Relaxed) {
2600 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2605 /// [`fence`]: fn.fence.html
2606 /// [`Ordering`]: enum.Ordering.html
2607 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2608 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2609 /// [`Release`]: enum.Ordering.html#variant.Release
2610 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2611 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2612 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2614 #[stable(feature = "compiler_fences", since = "1.21.0")]
2615 pub fn compiler_fence(order: Ordering) {
2618 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2619 Release => intrinsics::atomic_singlethreadfence_rel(),
2620 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2621 SeqCst => intrinsics::atomic_singlethreadfence(),
2622 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2627 #[cfg(target_has_atomic_load_store = "8")]
2628 #[stable(feature = "atomic_debug", since = "1.3.0")]
2629 impl fmt::Debug for AtomicBool {
2630 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2631 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2635 #[cfg(target_has_atomic_load_store = "ptr")]
2636 #[stable(feature = "atomic_debug", since = "1.3.0")]
2637 impl<T> fmt::Debug for AtomicPtr<T> {
2638 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2639 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2643 #[cfg(target_has_atomic_load_store = "ptr")]
2644 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2645 impl<T> fmt::Pointer for AtomicPtr<T> {
2646 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2647 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)