3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 // ignore-tidy-undocumented-unsafe
117 #![stable(feature = "rust1", since = "1.0.0")]
118 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
119 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
121 use self::Ordering::*;
123 use crate::intrinsics;
124 use crate::cell::UnsafeCell;
127 use crate::hint::spin_loop;
129 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
131 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
132 /// power or switching hyper-threads.
134 /// This function is different from [`std::thread::yield_now`] which directly yields to the
135 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
137 /// Spin locks can be very efficient for short lock durations because they do not involve context
138 /// switches or interaction with the operating system. For long lock durations they become wasteful
139 /// however because they use CPU cycles for the entire lock duration, and using a
140 /// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is
141 /// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`]
142 /// or [`std::thread::sleep`] may be the best option.
144 /// **Note**: Spin locks are based on the underlying assumption that another thread will release
145 /// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or
146 /// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms.
148 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
149 /// do anything at all.
151 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
152 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
153 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
155 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
156 pub fn spin_loop_hint() {
160 /// A boolean type which can be safely shared between threads.
162 /// This type has the same in-memory representation as a [`bool`].
164 /// [`bool`]: ../../../std/primitive.bool.html
165 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
166 #[stable(feature = "rust1", since = "1.0.0")]
168 pub struct AtomicBool {
172 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
173 #[stable(feature = "rust1", since = "1.0.0")]
174 impl Default for AtomicBool {
175 /// Creates an `AtomicBool` initialized to `false`.
176 fn default() -> Self {
181 // Send is implicitly implemented for AtomicBool.
182 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
183 #[stable(feature = "rust1", since = "1.0.0")]
184 unsafe impl Sync for AtomicBool {}
186 /// A raw pointer type which can be safely shared between threads.
188 /// This type has the same in-memory representation as a `*mut T`.
189 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
190 #[stable(feature = "rust1", since = "1.0.0")]
191 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
192 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
193 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
194 pub struct AtomicPtr<T> {
195 p: UnsafeCell<*mut T>,
198 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
199 #[stable(feature = "rust1", since = "1.0.0")]
200 impl<T> Default for AtomicPtr<T> {
201 /// Creates a null `AtomicPtr<T>`.
202 fn default() -> AtomicPtr<T> {
203 AtomicPtr::new(crate::ptr::null_mut())
207 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
208 #[stable(feature = "rust1", since = "1.0.0")]
209 unsafe impl<T> Send for AtomicPtr<T> {}
210 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
211 #[stable(feature = "rust1", since = "1.0.0")]
212 unsafe impl<T> Sync for AtomicPtr<T> {}
214 /// Atomic memory orderings
216 /// Memory orderings specify the way atomic operations synchronize memory.
217 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
218 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
219 /// operations synchronize other memory while additionally preserving a total order of such
220 /// operations across all threads.
222 /// Rust's memory orderings are [the same as those of
223 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
225 /// For more information see the [nomicon].
227 /// [nomicon]: ../../../nomicon/atomics.html
228 /// [Ordering::Relaxed]: #variant.Relaxed
229 /// [Ordering::SeqCst]: #variant.SeqCst
230 #[stable(feature = "rust1", since = "1.0.0")]
231 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
234 /// No ordering constraints, only atomic operations.
236 /// Corresponds to [`memory_order_relaxed`] in C++20.
238 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
239 #[stable(feature = "rust1", since = "1.0.0")]
241 /// When coupled with a store, all previous operations become ordered
242 /// before any load of this value with [`Acquire`] (or stronger) ordering.
243 /// In particular, all previous writes become visible to all threads
244 /// that perform an [`Acquire`] (or stronger) load of this value.
246 /// Notice that using this ordering for an operation that combines loads
247 /// and stores leads to a [`Relaxed`] load operation!
249 /// This ordering is only applicable for operations that can perform a store.
251 /// Corresponds to [`memory_order_release`] in C++20.
253 /// [`Release`]: #variant.Release
254 /// [`Acquire`]: #variant.Acquire
255 /// [`Relaxed`]: #variant.Relaxed
256 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
257 #[stable(feature = "rust1", since = "1.0.0")]
259 /// When coupled with a load, if the loaded value was written by a store operation with
260 /// [`Release`] (or stronger) ordering, then all subsequent operations
261 /// become ordered after that store. In particular, all subsequent loads will see data
262 /// written before the store.
264 /// Notice that using this ordering for an operation that combines loads
265 /// and stores leads to a [`Relaxed`] store operation!
267 /// This ordering is only applicable for operations that can perform a load.
269 /// Corresponds to [`memory_order_acquire`] in C++20.
271 /// [`Acquire`]: #variant.Acquire
272 /// [`Release`]: #variant.Release
273 /// [`Relaxed`]: #variant.Relaxed
274 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
275 #[stable(feature = "rust1", since = "1.0.0")]
277 /// Has the effects of both [`Acquire`] and [`Release`] together:
278 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
280 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
281 /// not performing any store and hence it has just [`Acquire`] ordering. However,
282 /// `AcqRel` will never perform [`Relaxed`] accesses.
284 /// This ordering is only applicable for operations that combine both loads and stores.
286 /// Corresponds to [`memory_order_acq_rel`] in C++20.
288 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
289 /// [`Acquire`]: #variant.Acquire
290 /// [`Release`]: #variant.Release
291 /// [`Relaxed`]: #variant.Relaxed
292 #[stable(feature = "rust1", since = "1.0.0")]
294 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
295 /// operations, respectively) with the additional guarantee that all threads see all
296 /// sequentially consistent operations in the same order.
298 /// Corresponds to [`memory_order_seq_cst`] in C++20.
300 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
301 /// [`Acquire`]: #variant.Acquire
302 /// [`Release`]: #variant.Release
303 /// [`AcqRel`]: #variant.AcqRel
304 #[stable(feature = "rust1", since = "1.0.0")]
308 /// An [`AtomicBool`] initialized to `false`.
310 /// [`AtomicBool`]: struct.AtomicBool.html
311 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
312 #[stable(feature = "rust1", since = "1.0.0")]
315 reason = "the `new` function is now preferred",
316 suggestion = "AtomicBool::new(false)",
318 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
320 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
322 /// Creates a new `AtomicBool`.
327 /// use std::sync::atomic::AtomicBool;
329 /// let atomic_true = AtomicBool::new(true);
330 /// let atomic_false = AtomicBool::new(false);
333 #[stable(feature = "rust1", since = "1.0.0")]
334 pub const fn new(v: bool) -> AtomicBool {
335 AtomicBool { v: UnsafeCell::new(v as u8) }
338 /// Returns a mutable reference to the underlying [`bool`].
340 /// This is safe because the mutable reference guarantees that no other threads are
341 /// concurrently accessing the atomic data.
343 /// [`bool`]: ../../../std/primitive.bool.html
348 /// use std::sync::atomic::{AtomicBool, Ordering};
350 /// let mut some_bool = AtomicBool::new(true);
351 /// assert_eq!(*some_bool.get_mut(), true);
352 /// *some_bool.get_mut() = false;
353 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
356 #[stable(feature = "atomic_access", since = "1.15.0")]
357 pub fn get_mut(&mut self) -> &mut bool {
358 unsafe { &mut *(self.v.get() as *mut bool) }
361 /// Consumes the atomic and returns the contained value.
363 /// This is safe because passing `self` by value guarantees that no other threads are
364 /// concurrently accessing the atomic data.
369 /// use std::sync::atomic::AtomicBool;
371 /// let some_bool = AtomicBool::new(true);
372 /// assert_eq!(some_bool.into_inner(), true);
375 #[stable(feature = "atomic_access", since = "1.15.0")]
376 pub fn into_inner(self) -> bool {
377 self.v.into_inner() != 0
380 /// Loads a value from the bool.
382 /// `load` takes an [`Ordering`] argument which describes the memory ordering
383 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
387 /// Panics if `order` is [`Release`] or [`AcqRel`].
389 /// [`Ordering`]: enum.Ordering.html
390 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
391 /// [`Release`]: enum.Ordering.html#variant.Release
392 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
393 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
394 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
399 /// use std::sync::atomic::{AtomicBool, Ordering};
401 /// let some_bool = AtomicBool::new(true);
403 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
406 #[stable(feature = "rust1", since = "1.0.0")]
407 pub fn load(&self, order: Ordering) -> bool {
408 unsafe { atomic_load(self.v.get(), order) != 0 }
411 /// Stores a value into the bool.
413 /// `store` takes an [`Ordering`] argument which describes the memory ordering
414 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
418 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
420 /// [`Ordering`]: enum.Ordering.html
421 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
422 /// [`Release`]: enum.Ordering.html#variant.Release
423 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
424 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
425 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
430 /// use std::sync::atomic::{AtomicBool, Ordering};
432 /// let some_bool = AtomicBool::new(true);
434 /// some_bool.store(false, Ordering::Relaxed);
435 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
438 #[stable(feature = "rust1", since = "1.0.0")]
439 pub fn store(&self, val: bool, order: Ordering) {
441 atomic_store(self.v.get(), val as u8, order);
445 /// Stores a value into the bool, returning the previous value.
447 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
448 /// of this operation. All ordering modes are possible. Note that using
449 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
450 /// using [`Release`] makes the load part [`Relaxed`].
452 /// [`Ordering`]: enum.Ordering.html
453 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
454 /// [`Release`]: enum.Ordering.html#variant.Release
455 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
460 /// use std::sync::atomic::{AtomicBool, Ordering};
462 /// let some_bool = AtomicBool::new(true);
464 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
465 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
468 #[stable(feature = "rust1", since = "1.0.0")]
469 #[cfg(target_has_atomic = "8")]
470 pub fn swap(&self, val: bool, order: Ordering) -> bool {
471 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
474 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
476 /// The return value is always the previous value. If it is equal to `current`, then the value
479 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
480 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
481 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
482 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
483 /// happens, and using [`Release`] makes the load part [`Relaxed`].
485 /// [`Ordering`]: enum.Ordering.html
486 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
487 /// [`Release`]: enum.Ordering.html#variant.Release
488 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
489 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
490 /// [`bool`]: ../../../std/primitive.bool.html
495 /// use std::sync::atomic::{AtomicBool, Ordering};
497 /// let some_bool = AtomicBool::new(true);
499 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
500 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
502 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
503 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
506 #[stable(feature = "rust1", since = "1.0.0")]
507 #[cfg(target_has_atomic = "8")]
508 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
509 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
515 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
517 /// The return value is a result indicating whether the new value was written and containing
518 /// the previous value. On success this value is guaranteed to be equal to `current`.
520 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
521 /// ordering of this operation. The first describes the required ordering if the
522 /// operation succeeds while the second describes the required ordering when the
523 /// operation fails. Using [`Acquire`] as success ordering makes the store part
524 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
525 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
526 /// and must be equivalent to or weaker than the success ordering.
529 /// [`bool`]: ../../../std/primitive.bool.html
530 /// [`Ordering`]: enum.Ordering.html
531 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
532 /// [`Release`]: enum.Ordering.html#variant.Release
533 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
534 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
539 /// use std::sync::atomic::{AtomicBool, Ordering};
541 /// let some_bool = AtomicBool::new(true);
543 /// assert_eq!(some_bool.compare_exchange(true,
545 /// Ordering::Acquire,
546 /// Ordering::Relaxed),
548 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
550 /// assert_eq!(some_bool.compare_exchange(true, true,
551 /// Ordering::SeqCst,
552 /// Ordering::Acquire),
554 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
557 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
558 #[cfg(target_has_atomic = "8")]
559 pub fn compare_exchange(&self,
564 -> Result<bool, bool> {
566 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
569 Err(x) => Err(x != 0),
573 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
575 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
576 /// comparison succeeds, which can result in more efficient code on some platforms. The
577 /// return value is a result indicating whether the new value was written and containing the
580 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
581 /// ordering of this operation. The first describes the required ordering if the
582 /// operation succeeds while the second describes the required ordering when the
583 /// operation fails. Using [`Acquire`] as success ordering makes the store part
584 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
585 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
586 /// and must be equivalent to or weaker than the success ordering.
588 /// [`bool`]: ../../../std/primitive.bool.html
589 /// [`compare_exchange`]: #method.compare_exchange
590 /// [`Ordering`]: enum.Ordering.html
591 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
592 /// [`Release`]: enum.Ordering.html#variant.Release
593 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
594 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
599 /// use std::sync::atomic::{AtomicBool, Ordering};
601 /// let val = AtomicBool::new(false);
604 /// let mut old = val.load(Ordering::Relaxed);
606 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
608 /// Err(x) => old = x,
613 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
614 #[cfg(target_has_atomic = "8")]
615 pub fn compare_exchange_weak(&self,
620 -> Result<bool, bool> {
622 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
625 Err(x) => Err(x != 0),
629 /// Logical "and" with a boolean value.
631 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
632 /// the new value to the result.
634 /// Returns the previous value.
636 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
637 /// of this operation. All ordering modes are possible. Note that using
638 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
639 /// using [`Release`] makes the load part [`Relaxed`].
641 /// [`Ordering`]: enum.Ordering.html
642 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
643 /// [`Release`]: enum.Ordering.html#variant.Release
644 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
649 /// use std::sync::atomic::{AtomicBool, Ordering};
651 /// let foo = AtomicBool::new(true);
652 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
653 /// assert_eq!(foo.load(Ordering::SeqCst), false);
655 /// let foo = AtomicBool::new(true);
656 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
657 /// assert_eq!(foo.load(Ordering::SeqCst), true);
659 /// let foo = AtomicBool::new(false);
660 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
661 /// assert_eq!(foo.load(Ordering::SeqCst), false);
664 #[stable(feature = "rust1", since = "1.0.0")]
665 #[cfg(target_has_atomic = "8")]
666 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
667 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
670 /// Logical "nand" with a boolean value.
672 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
673 /// the new value to the result.
675 /// Returns the previous value.
677 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
678 /// of this operation. All ordering modes are possible. Note that using
679 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
680 /// using [`Release`] makes the load part [`Relaxed`].
682 /// [`Ordering`]: enum.Ordering.html
683 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
684 /// [`Release`]: enum.Ordering.html#variant.Release
685 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
690 /// use std::sync::atomic::{AtomicBool, Ordering};
692 /// let foo = AtomicBool::new(true);
693 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
694 /// assert_eq!(foo.load(Ordering::SeqCst), true);
696 /// let foo = AtomicBool::new(true);
697 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
698 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
699 /// assert_eq!(foo.load(Ordering::SeqCst), false);
701 /// let foo = AtomicBool::new(false);
702 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
703 /// assert_eq!(foo.load(Ordering::SeqCst), true);
706 #[stable(feature = "rust1", since = "1.0.0")]
707 #[cfg(target_has_atomic = "8")]
708 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
709 // We can't use atomic_nand here because it can result in a bool with
710 // an invalid value. This happens because the atomic operation is done
711 // with an 8-bit integer internally, which would set the upper 7 bits.
712 // So we just use fetch_xor or swap instead.
715 // We must invert the bool.
716 self.fetch_xor(true, order)
718 // !(x & false) == true
719 // We must set the bool to true.
720 self.swap(true, order)
724 /// Logical "or" with a boolean value.
726 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
727 /// new value to the result.
729 /// Returns the previous value.
731 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
732 /// of this operation. All ordering modes are possible. Note that using
733 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
734 /// using [`Release`] makes the load part [`Relaxed`].
736 /// [`Ordering`]: enum.Ordering.html
737 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
738 /// [`Release`]: enum.Ordering.html#variant.Release
739 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
744 /// use std::sync::atomic::{AtomicBool, Ordering};
746 /// let foo = AtomicBool::new(true);
747 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
748 /// assert_eq!(foo.load(Ordering::SeqCst), true);
750 /// let foo = AtomicBool::new(true);
751 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
752 /// assert_eq!(foo.load(Ordering::SeqCst), true);
754 /// let foo = AtomicBool::new(false);
755 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
756 /// assert_eq!(foo.load(Ordering::SeqCst), false);
759 #[stable(feature = "rust1", since = "1.0.0")]
760 #[cfg(target_has_atomic = "8")]
761 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
762 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
765 /// Logical "xor" with a boolean value.
767 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
768 /// the new value to the result.
770 /// Returns the previous value.
772 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
773 /// of this operation. All ordering modes are possible. Note that using
774 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
775 /// using [`Release`] makes the load part [`Relaxed`].
777 /// [`Ordering`]: enum.Ordering.html
778 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
779 /// [`Release`]: enum.Ordering.html#variant.Release
780 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
785 /// use std::sync::atomic::{AtomicBool, Ordering};
787 /// let foo = AtomicBool::new(true);
788 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
789 /// assert_eq!(foo.load(Ordering::SeqCst), true);
791 /// let foo = AtomicBool::new(true);
792 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
793 /// assert_eq!(foo.load(Ordering::SeqCst), false);
795 /// let foo = AtomicBool::new(false);
796 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
797 /// assert_eq!(foo.load(Ordering::SeqCst), false);
800 #[stable(feature = "rust1", since = "1.0.0")]
801 #[cfg(target_has_atomic = "8")]
802 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
803 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
806 /// Returns a mutable pointer to the underlying [`bool`].
808 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
809 /// This method is mostly useful for FFI, where the function signature may use
810 /// `*mut bool` instead of `&AtomicBool`.
812 /// [`bool`]: ../../../std/primitive.bool.html
816 /// ```ignore (extern-declaration)
818 /// use std::sync::atomic::AtomicBool;
820 /// fn my_atomic_op(arg: *mut bool);
823 /// let mut atomic = AtomicBool::new(true);
825 /// my_atomic_op(atomic.as_mut_ptr());
830 #[unstable(feature = "atomic_mut_ptr",
831 reason = "recently added",
833 pub fn as_mut_ptr(&self) -> *mut bool {
834 self.v.get() as *mut bool
838 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
839 impl<T> AtomicPtr<T> {
840 /// Creates a new `AtomicPtr`.
845 /// use std::sync::atomic::AtomicPtr;
847 /// let ptr = &mut 5;
848 /// let atomic_ptr = AtomicPtr::new(ptr);
851 #[stable(feature = "rust1", since = "1.0.0")]
852 pub const fn new(p: *mut T) -> AtomicPtr<T> {
853 AtomicPtr { p: UnsafeCell::new(p) }
856 /// Returns a mutable reference to the underlying pointer.
858 /// This is safe because the mutable reference guarantees that no other threads are
859 /// concurrently accessing the atomic data.
864 /// use std::sync::atomic::{AtomicPtr, Ordering};
866 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
867 /// *atomic_ptr.get_mut() = &mut 5;
868 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
871 #[stable(feature = "atomic_access", since = "1.15.0")]
872 pub fn get_mut(&mut self) -> &mut *mut T {
873 unsafe { &mut *self.p.get() }
876 /// Consumes the atomic and returns the contained value.
878 /// This is safe because passing `self` by value guarantees that no other threads are
879 /// concurrently accessing the atomic data.
884 /// use std::sync::atomic::AtomicPtr;
886 /// let atomic_ptr = AtomicPtr::new(&mut 5);
887 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
890 #[stable(feature = "atomic_access", since = "1.15.0")]
891 pub fn into_inner(self) -> *mut T {
895 /// Loads a value from the pointer.
897 /// `load` takes an [`Ordering`] argument which describes the memory ordering
898 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
902 /// Panics if `order` is [`Release`] or [`AcqRel`].
904 /// [`Ordering`]: enum.Ordering.html
905 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
906 /// [`Release`]: enum.Ordering.html#variant.Release
907 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
908 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
909 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
914 /// use std::sync::atomic::{AtomicPtr, Ordering};
916 /// let ptr = &mut 5;
917 /// let some_ptr = AtomicPtr::new(ptr);
919 /// let value = some_ptr.load(Ordering::Relaxed);
922 #[stable(feature = "rust1", since = "1.0.0")]
923 pub fn load(&self, order: Ordering) -> *mut T {
924 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
927 /// Stores a value into the pointer.
929 /// `store` takes an [`Ordering`] argument which describes the memory ordering
930 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
934 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
936 /// [`Ordering`]: enum.Ordering.html
937 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
938 /// [`Release`]: enum.Ordering.html#variant.Release
939 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
940 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
941 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
946 /// use std::sync::atomic::{AtomicPtr, Ordering};
948 /// let ptr = &mut 5;
949 /// let some_ptr = AtomicPtr::new(ptr);
951 /// let other_ptr = &mut 10;
953 /// some_ptr.store(other_ptr, Ordering::Relaxed);
956 #[stable(feature = "rust1", since = "1.0.0")]
957 pub fn store(&self, ptr: *mut T, order: Ordering) {
959 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
963 /// Stores a value into the pointer, returning the previous value.
965 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
966 /// of this operation. All ordering modes are possible. Note that using
967 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
968 /// using [`Release`] makes the load part [`Relaxed`].
970 /// [`Ordering`]: enum.Ordering.html
971 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
972 /// [`Release`]: enum.Ordering.html#variant.Release
973 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
978 /// use std::sync::atomic::{AtomicPtr, Ordering};
980 /// let ptr = &mut 5;
981 /// let some_ptr = AtomicPtr::new(ptr);
983 /// let other_ptr = &mut 10;
985 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
988 #[stable(feature = "rust1", since = "1.0.0")]
989 #[cfg(target_has_atomic = "ptr")]
990 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
991 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
994 /// Stores a value into the pointer if the current value is the same as the `current` value.
996 /// The return value is always the previous value. If it is equal to `current`, then the value
999 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1000 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1001 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1002 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1003 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1005 /// [`Ordering`]: enum.Ordering.html
1006 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1007 /// [`Release`]: enum.Ordering.html#variant.Release
1008 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1009 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1014 /// use std::sync::atomic::{AtomicPtr, Ordering};
1016 /// let ptr = &mut 5;
1017 /// let some_ptr = AtomicPtr::new(ptr);
1019 /// let other_ptr = &mut 10;
1021 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1024 #[stable(feature = "rust1", since = "1.0.0")]
1025 #[cfg(target_has_atomic = "ptr")]
1026 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1027 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1033 /// Stores a value into the pointer if the current value is the same as the `current` value.
1035 /// The return value is a result indicating whether the new value was written and containing
1036 /// the previous value. On success this value is guaranteed to be equal to `current`.
1038 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1039 /// ordering of this operation. The first describes the required ordering if the
1040 /// operation succeeds while the second describes the required ordering when the
1041 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1042 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1043 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1044 /// and must be equivalent to or weaker than the success ordering.
1046 /// [`Ordering`]: enum.Ordering.html
1047 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1048 /// [`Release`]: enum.Ordering.html#variant.Release
1049 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1050 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1055 /// use std::sync::atomic::{AtomicPtr, Ordering};
1057 /// let ptr = &mut 5;
1058 /// let some_ptr = AtomicPtr::new(ptr);
1060 /// let other_ptr = &mut 10;
1062 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1063 /// Ordering::SeqCst, Ordering::Relaxed);
1066 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1067 #[cfg(target_has_atomic = "ptr")]
1068 pub fn compare_exchange(&self,
1073 -> Result<*mut T, *mut T> {
1075 let res = atomic_compare_exchange(self.p.get() as *mut usize,
1081 Ok(x) => Ok(x as *mut T),
1082 Err(x) => Err(x as *mut T),
1087 /// Stores a value into the pointer if the current value is the same as the `current` value.
1089 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1090 /// comparison succeeds, which can result in more efficient code on some platforms. The
1091 /// return value is a result indicating whether the new value was written and containing the
1094 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1095 /// ordering of this operation. The first describes the required ordering if the
1096 /// operation succeeds while the second describes the required ordering when the
1097 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1098 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1099 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1100 /// and must be equivalent to or weaker than the success ordering.
1102 /// [`compare_exchange`]: #method.compare_exchange
1103 /// [`Ordering`]: enum.Ordering.html
1104 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1105 /// [`Release`]: enum.Ordering.html#variant.Release
1106 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1107 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1112 /// use std::sync::atomic::{AtomicPtr, Ordering};
1114 /// let some_ptr = AtomicPtr::new(&mut 5);
1116 /// let new = &mut 10;
1117 /// let mut old = some_ptr.load(Ordering::Relaxed);
1119 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1121 /// Err(x) => old = x,
1126 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1127 #[cfg(target_has_atomic = "ptr")]
1128 pub fn compare_exchange_weak(&self,
1133 -> Result<*mut T, *mut T> {
1135 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
1141 Ok(x) => Ok(x as *mut T),
1142 Err(x) => Err(x as *mut T),
1148 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1149 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1150 impl From<bool> for AtomicBool {
1151 /// Converts a `bool` into an `AtomicBool`.
1156 /// use std::sync::atomic::AtomicBool;
1157 /// let atomic_bool = AtomicBool::from(true);
1158 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1161 fn from(b: bool) -> Self { Self::new(b) }
1164 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
1165 #[stable(feature = "atomic_from", since = "1.23.0")]
1166 impl<T> From<*mut T> for AtomicPtr<T> {
1168 fn from(p: *mut T) -> Self { Self::new(p) }
1171 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1172 macro_rules! atomic_int {
1177 $stable_access:meta,
1180 $stable_init_const:meta,
1181 $s_int_type:expr, $int_ref:expr,
1182 $extra_feature:expr,
1183 $min_fn:ident, $max_fn:ident,
1186 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1187 /// An integer type which can be safely shared between threads.
1189 /// This type has the same in-memory representation as the underlying
1190 /// integer type, [`
1191 #[doc = $s_int_type]
1194 /// ). For more about the differences between atomic types and
1195 /// non-atomic types as well as information about the portability of
1196 /// this type, please see the [module-level documentation].
1198 /// [module-level documentation]: index.html
1200 #[repr(C, align($align))]
1201 pub struct $atomic_type {
1202 v: UnsafeCell<$int_type>,
1205 /// An atomic integer initialized to `0`.
1206 #[$stable_init_const]
1209 reason = "the `new` function is now preferred",
1210 suggestion = $atomic_new,
1212 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1215 impl Default for $atomic_type {
1216 fn default() -> Self {
1217 Self::new(Default::default())
1222 impl From<$int_type> for $atomic_type {
1225 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1227 fn from(v: $int_type) -> Self { Self::new(v) }
1232 impl fmt::Debug for $atomic_type {
1233 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1234 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1238 // Send is implicitly implemented.
1240 unsafe impl Sync for $atomic_type {}
1244 concat!("Creates a new atomic integer.
1249 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1251 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1255 pub const fn new(v: $int_type) -> Self {
1256 $atomic_type {v: UnsafeCell::new(v)}
1261 concat!("Returns a mutable reference to the underlying integer.
1263 This is safe because the mutable reference guarantees that no other threads are
1264 concurrently accessing the atomic data.
1269 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1271 let mut some_var = ", stringify!($atomic_type), "::new(10);
1272 assert_eq!(*some_var.get_mut(), 10);
1273 *some_var.get_mut() = 5;
1274 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1278 pub fn get_mut(&mut self) -> &mut $int_type {
1279 unsafe { &mut *self.v.get() }
1284 concat!("Consumes the atomic and returns the contained value.
1286 This is safe because passing `self` by value guarantees that no other threads are
1287 concurrently accessing the atomic data.
1292 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1294 let some_var = ", stringify!($atomic_type), "::new(5);
1295 assert_eq!(some_var.into_inner(), 5);
1299 pub fn into_inner(self) -> $int_type {
1305 concat!("Loads a value from the atomic integer.
1307 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1308 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1312 Panics if `order` is [`Release`] or [`AcqRel`].
1314 [`Ordering`]: enum.Ordering.html
1315 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1316 [`Release`]: enum.Ordering.html#variant.Release
1317 [`Acquire`]: enum.Ordering.html#variant.Acquire
1318 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1319 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1324 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1326 let some_var = ", stringify!($atomic_type), "::new(5);
1328 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1332 pub fn load(&self, order: Ordering) -> $int_type {
1333 unsafe { atomic_load(self.v.get(), order) }
1338 concat!("Stores a value into the atomic integer.
1340 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1341 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1345 Panics if `order` is [`Acquire`] or [`AcqRel`].
1347 [`Ordering`]: enum.Ordering.html
1348 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1349 [`Release`]: enum.Ordering.html#variant.Release
1350 [`Acquire`]: enum.Ordering.html#variant.Acquire
1351 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1352 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1357 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1359 let some_var = ", stringify!($atomic_type), "::new(5);
1361 some_var.store(10, Ordering::Relaxed);
1362 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1366 pub fn store(&self, val: $int_type, order: Ordering) {
1367 unsafe { atomic_store(self.v.get(), val, order); }
1372 concat!("Stores a value into the atomic integer, returning the previous value.
1374 `swap` takes an [`Ordering`] argument which describes the memory ordering
1375 of this operation. All ordering modes are possible. Note that using
1376 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1377 using [`Release`] makes the load part [`Relaxed`].
1379 [`Ordering`]: enum.Ordering.html
1380 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1381 [`Release`]: enum.Ordering.html#variant.Release
1382 [`Acquire`]: enum.Ordering.html#variant.Acquire
1387 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1389 let some_var = ", stringify!($atomic_type), "::new(5);
1391 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1396 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1397 unsafe { atomic_swap(self.v.get(), val, order) }
1402 concat!("Stores a value into the atomic integer if the current value is the same as
1403 the `current` value.
1405 The return value is always the previous value. If it is equal to `current`, then the
1408 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1409 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1410 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1411 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1412 happens, and using [`Release`] makes the load part [`Relaxed`].
1414 [`Ordering`]: enum.Ordering.html
1415 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1416 [`Release`]: enum.Ordering.html#variant.Release
1417 [`Acquire`]: enum.Ordering.html#variant.Acquire
1418 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1423 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1425 let some_var = ", stringify!($atomic_type), "::new(5);
1427 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1428 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1430 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1431 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1436 pub fn compare_and_swap(&self,
1439 order: Ordering) -> $int_type {
1440 match self.compare_exchange(current,
1443 strongest_failure_ordering(order)) {
1451 concat!("Stores a value into the atomic integer if the current value is the same as
1452 the `current` value.
1454 The return value is a result indicating whether the new value was written and
1455 containing the previous value. On success this value is guaranteed to be equal to
1458 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1459 ordering of this operation. The first describes the required ordering if the
1460 operation succeeds while the second describes the required ordering when the
1461 operation fails. Using [`Acquire`] as success ordering makes the store part
1462 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1463 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1464 and must be equivalent to or weaker than the success ordering.
1466 [`Ordering`]: enum.Ordering.html
1467 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1468 [`Release`]: enum.Ordering.html#variant.Release
1469 [`Acquire`]: enum.Ordering.html#variant.Acquire
1470 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1475 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1477 let some_var = ", stringify!($atomic_type), "::new(5);
1479 assert_eq!(some_var.compare_exchange(5, 10,
1483 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1485 assert_eq!(some_var.compare_exchange(6, 12,
1489 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1494 pub fn compare_exchange(&self,
1498 failure: Ordering) -> Result<$int_type, $int_type> {
1499 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1504 concat!("Stores a value into the atomic integer if the current value is the same as
1505 the `current` value.
1507 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1508 when the comparison succeeds, which can result in more efficient code on some
1509 platforms. The return value is a result indicating whether the new value was
1510 written and containing the previous value.
1512 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1513 ordering of this operation. The first describes the required ordering if the
1514 operation succeeds while the second describes the required ordering when the
1515 operation fails. Using [`Acquire`] as success ordering makes the store part
1516 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1517 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1518 and must be equivalent to or weaker than the success ordering.
1520 [`compare_exchange`]: #method.compare_exchange
1521 [`Ordering`]: enum.Ordering.html
1522 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1523 [`Release`]: enum.Ordering.html#variant.Release
1524 [`Acquire`]: enum.Ordering.html#variant.Acquire
1525 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1530 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1532 let val = ", stringify!($atomic_type), "::new(4);
1534 let mut old = val.load(Ordering::Relaxed);
1537 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1546 pub fn compare_exchange_weak(&self,
1550 failure: Ordering) -> Result<$int_type, $int_type> {
1552 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1558 concat!("Adds to the current value, returning the previous value.
1560 This operation wraps around on overflow.
1562 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1563 of this operation. All ordering modes are possible. Note that using
1564 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1565 using [`Release`] makes the load part [`Relaxed`].
1567 [`Ordering`]: enum.Ordering.html
1568 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1569 [`Release`]: enum.Ordering.html#variant.Release
1570 [`Acquire`]: enum.Ordering.html#variant.Acquire
1575 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1577 let foo = ", stringify!($atomic_type), "::new(0);
1578 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1579 assert_eq!(foo.load(Ordering::SeqCst), 10);
1584 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1585 unsafe { atomic_add(self.v.get(), val, order) }
1590 concat!("Subtracts from the current value, returning the previous value.
1592 This operation wraps around on overflow.
1594 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1595 of this operation. All ordering modes are possible. Note that using
1596 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1597 using [`Release`] makes the load part [`Relaxed`].
1599 [`Ordering`]: enum.Ordering.html
1600 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1601 [`Release`]: enum.Ordering.html#variant.Release
1602 [`Acquire`]: enum.Ordering.html#variant.Acquire
1607 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1609 let foo = ", stringify!($atomic_type), "::new(20);
1610 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1611 assert_eq!(foo.load(Ordering::SeqCst), 10);
1616 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1617 unsafe { atomic_sub(self.v.get(), val, order) }
1622 concat!("Bitwise \"and\" with the current value.
1624 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1625 sets the new value to the result.
1627 Returns the previous value.
1629 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1630 of this operation. All ordering modes are possible. Note that using
1631 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1632 using [`Release`] makes the load part [`Relaxed`].
1634 [`Ordering`]: enum.Ordering.html
1635 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1636 [`Release`]: enum.Ordering.html#variant.Release
1637 [`Acquire`]: enum.Ordering.html#variant.Acquire
1642 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1644 let foo = ", stringify!($atomic_type), "::new(0b101101);
1645 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1646 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1651 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1652 unsafe { atomic_and(self.v.get(), val, order) }
1657 concat!("Bitwise \"nand\" with the current value.
1659 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1660 sets the new value to the result.
1662 Returns the previous value.
1664 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1665 of this operation. All ordering modes are possible. Note that using
1666 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1667 using [`Release`] makes the load part [`Relaxed`].
1669 [`Ordering`]: enum.Ordering.html
1670 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1671 [`Release`]: enum.Ordering.html#variant.Release
1672 [`Acquire`]: enum.Ordering.html#variant.Acquire
1677 ", $extra_feature, "
1678 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1680 let foo = ", stringify!($atomic_type), "::new(0x13);
1681 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1682 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1687 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1688 unsafe { atomic_nand(self.v.get(), val, order) }
1693 concat!("Bitwise \"or\" with the current value.
1695 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1696 sets the new value to the result.
1698 Returns the previous value.
1700 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1701 of this operation. All ordering modes are possible. Note that using
1702 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1703 using [`Release`] makes the load part [`Relaxed`].
1705 [`Ordering`]: enum.Ordering.html
1706 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1707 [`Release`]: enum.Ordering.html#variant.Release
1708 [`Acquire`]: enum.Ordering.html#variant.Acquire
1713 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1715 let foo = ", stringify!($atomic_type), "::new(0b101101);
1716 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1717 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1722 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1723 unsafe { atomic_or(self.v.get(), val, order) }
1728 concat!("Bitwise \"xor\" with the current value.
1730 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1731 sets the new value to the result.
1733 Returns the previous value.
1735 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1736 of this operation. All ordering modes are possible. Note that using
1737 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1738 using [`Release`] makes the load part [`Relaxed`].
1740 [`Ordering`]: enum.Ordering.html
1741 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1742 [`Release`]: enum.Ordering.html#variant.Release
1743 [`Acquire`]: enum.Ordering.html#variant.Acquire
1748 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1750 let foo = ", stringify!($atomic_type), "::new(0b101101);
1751 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1752 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1757 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1758 unsafe { atomic_xor(self.v.get(), val, order) }
1763 concat!("Fetches the value, and applies a function to it that returns an optional
1764 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1765 `Err(previous_value)`.
1767 Note: This may call the function multiple times if the value has been changed from other threads in
1768 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1769 but once to the stored value.
1771 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1772 ordering of this operation. The first describes the required ordering for loads
1773 and failed updates while the second describes the required ordering when the
1774 operation finally succeeds. Beware that this is different from the two
1775 modes in [`compare_exchange`]!
1777 Using [`Acquire`] as success ordering makes the store part
1778 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1779 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1780 and must be equivalent to or weaker than the success ordering.
1782 [`bool`]: ../../../std/primitive.bool.html
1783 [`compare_exchange`]: #method.compare_exchange
1784 [`Ordering`]: enum.Ordering.html
1785 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1786 [`Release`]: enum.Ordering.html#variant.Release
1787 [`Acquire`]: enum.Ordering.html#variant.Acquire
1788 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1793 #![feature(no_more_cas)]
1794 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1796 let x = ", stringify!($atomic_type), "::new(7);
1797 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1798 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1799 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1800 assert_eq!(x.load(Ordering::SeqCst), 9);
1803 #[unstable(feature = "no_more_cas",
1804 reason = "no more CAS loops in user code",
1807 pub fn fetch_update<F>(&self,
1809 fetch_order: Ordering,
1810 set_order: Ordering) -> Result<$int_type, $int_type>
1811 where F: FnMut($int_type) -> Option<$int_type> {
1812 let mut prev = self.load(fetch_order);
1813 while let Some(next) = f(prev) {
1814 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1815 x @ Ok(_) => return x,
1816 Err(next_prev) => prev = next_prev
1824 concat!("Maximum with the current value.
1826 Finds the maximum of the current value and the argument `val`, and
1827 sets the new value to the result.
1829 Returns the previous value.
1831 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1832 of this operation. All ordering modes are possible. Note that using
1833 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1834 using [`Release`] makes the load part [`Relaxed`].
1836 [`Ordering`]: enum.Ordering.html
1837 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1838 [`Release`]: enum.Ordering.html#variant.Release
1839 [`Acquire`]: enum.Ordering.html#variant.Acquire
1844 #![feature(atomic_min_max)]
1845 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1847 let foo = ", stringify!($atomic_type), "::new(23);
1848 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1849 assert_eq!(foo.load(Ordering::SeqCst), 42);
1852 If you want to obtain the maximum value in one step, you can use the following:
1855 #![feature(atomic_min_max)]
1856 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1858 let foo = ", stringify!($atomic_type), "::new(23);
1860 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1861 assert!(max_foo == 42);
1864 #[unstable(feature = "atomic_min_max",
1865 reason = "easier and faster min/max than writing manual CAS loop",
1868 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1869 unsafe { $max_fn(self.v.get(), val, order) }
1874 concat!("Minimum with the current value.
1876 Finds the minimum of the current value and the argument `val`, and
1877 sets the new value to the result.
1879 Returns the previous value.
1881 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1882 of this operation. All ordering modes are possible. Note that using
1883 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1884 using [`Release`] makes the load part [`Relaxed`].
1886 [`Ordering`]: enum.Ordering.html
1887 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1888 [`Release`]: enum.Ordering.html#variant.Release
1889 [`Acquire`]: enum.Ordering.html#variant.Acquire
1894 #![feature(atomic_min_max)]
1895 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1897 let foo = ", stringify!($atomic_type), "::new(23);
1898 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1899 assert_eq!(foo.load(Ordering::Relaxed), 23);
1900 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1901 assert_eq!(foo.load(Ordering::Relaxed), 22);
1904 If you want to obtain the minimum value in one step, you can use the following:
1907 #![feature(atomic_min_max)]
1908 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1910 let foo = ", stringify!($atomic_type), "::new(23);
1912 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1913 assert_eq!(min_foo, 12);
1916 #[unstable(feature = "atomic_min_max",
1917 reason = "easier and faster min/max than writing manual CAS loop",
1920 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1921 unsafe { $min_fn(self.v.get(), val, order) }
1926 concat!("Returns a mutable pointer to the underlying integer.
1928 Doing non-atomic reads and writes on the resulting integer can be a data race.
1929 This method is mostly useful for FFI, where the function signature may use
1930 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1934 ```ignore (extern-declaration)
1936 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1939 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1942 let mut atomic = ", stringify!($atomic_type), "::new(1);
1944 my_atomic_op(atomic.as_mut_ptr());
1949 #[unstable(feature = "atomic_mut_ptr",
1950 reason = "recently added",
1952 pub fn as_mut_ptr(&self) -> *mut $int_type {
1960 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1962 cfg(target_has_atomic = "8"),
1963 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1964 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1965 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1966 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1967 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1968 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1969 unstable(feature = "integer_atomics", issue = "32976"),
1970 "i8", "../../../std/primitive.i8.html",
1972 atomic_min, atomic_max,
1975 i8 AtomicI8 ATOMIC_I8_INIT
1977 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1979 cfg(target_has_atomic = "8"),
1980 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1981 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1982 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1983 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1984 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1985 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1986 unstable(feature = "integer_atomics", issue = "32976"),
1987 "u8", "../../../std/primitive.u8.html",
1989 atomic_umin, atomic_umax,
1992 u8 AtomicU8 ATOMIC_U8_INIT
1994 #[cfg(any(bootstrap, target_has_atomic_load_store = "16"))]
1996 cfg(target_has_atomic = "16"),
1997 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1998 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1999 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2000 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2001 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2002 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2003 unstable(feature = "integer_atomics", issue = "32976"),
2004 "i16", "../../../std/primitive.i16.html",
2006 atomic_min, atomic_max,
2008 "AtomicI16::new(0)",
2009 i16 AtomicI16 ATOMIC_I16_INIT
2011 #[cfg(any(bootstrap, target_has_atomic_load_store = "16"))]
2013 cfg(target_has_atomic = "16"),
2014 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2015 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2016 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2017 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2018 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2019 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2020 unstable(feature = "integer_atomics", issue = "32976"),
2021 "u16", "../../../std/primitive.u16.html",
2023 atomic_umin, atomic_umax,
2025 "AtomicU16::new(0)",
2026 u16 AtomicU16 ATOMIC_U16_INIT
2028 #[cfg(any(bootstrap, target_has_atomic_load_store = "32"))]
2030 cfg(target_has_atomic = "32"),
2031 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2032 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2033 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2034 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2035 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2036 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2037 unstable(feature = "integer_atomics", issue = "32976"),
2038 "i32", "../../../std/primitive.i32.html",
2040 atomic_min, atomic_max,
2042 "AtomicI32::new(0)",
2043 i32 AtomicI32 ATOMIC_I32_INIT
2045 #[cfg(any(bootstrap, target_has_atomic_load_store = "32"))]
2047 cfg(target_has_atomic = "32"),
2048 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2049 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2050 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2051 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2052 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2053 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2054 unstable(feature = "integer_atomics", issue = "32976"),
2055 "u32", "../../../std/primitive.u32.html",
2057 atomic_umin, atomic_umax,
2059 "AtomicU32::new(0)",
2060 u32 AtomicU32 ATOMIC_U32_INIT
2063 all(bootstrap, target_has_atomic = "64"),
2064 target_has_atomic_load_store = "64"
2067 cfg(target_has_atomic = "64"),
2068 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2069 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2070 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2071 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2072 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2073 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2074 unstable(feature = "integer_atomics", issue = "32976"),
2075 "i64", "../../../std/primitive.i64.html",
2077 atomic_min, atomic_max,
2079 "AtomicI64::new(0)",
2080 i64 AtomicI64 ATOMIC_I64_INIT
2083 all(bootstrap, target_has_atomic = "64"),
2084 target_has_atomic_load_store = "64"
2087 cfg(target_has_atomic = "64"),
2088 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2089 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2090 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2091 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2092 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2093 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2094 unstable(feature = "integer_atomics", issue = "32976"),
2095 "u64", "../../../std/primitive.u64.html",
2097 atomic_umin, atomic_umax,
2099 "AtomicU64::new(0)",
2100 u64 AtomicU64 ATOMIC_U64_INIT
2102 #[cfg(target_has_atomic_load_store = "128")]
2104 cfg(target_has_atomic = "128"),
2105 unstable(feature = "integer_atomics", issue = "32976"),
2106 unstable(feature = "integer_atomics", issue = "32976"),
2107 unstable(feature = "integer_atomics", issue = "32976"),
2108 unstable(feature = "integer_atomics", issue = "32976"),
2109 unstable(feature = "integer_atomics", issue = "32976"),
2110 unstable(feature = "integer_atomics", issue = "32976"),
2111 unstable(feature = "integer_atomics", issue = "32976"),
2112 "i128", "../../../std/primitive.i128.html",
2113 "#![feature(integer_atomics)]\n\n",
2114 atomic_min, atomic_max,
2116 "AtomicI128::new(0)",
2117 i128 AtomicI128 ATOMIC_I128_INIT
2119 #[cfg(target_has_atomic_load_store = "128")]
2121 cfg(target_has_atomic = "128"),
2122 unstable(feature = "integer_atomics", issue = "32976"),
2123 unstable(feature = "integer_atomics", issue = "32976"),
2124 unstable(feature = "integer_atomics", issue = "32976"),
2125 unstable(feature = "integer_atomics", issue = "32976"),
2126 unstable(feature = "integer_atomics", issue = "32976"),
2127 unstable(feature = "integer_atomics", issue = "32976"),
2128 unstable(feature = "integer_atomics", issue = "32976"),
2129 "u128", "../../../std/primitive.u128.html",
2130 "#![feature(integer_atomics)]\n\n",
2131 atomic_umin, atomic_umax,
2133 "AtomicU128::new(0)",
2134 u128 AtomicU128 ATOMIC_U128_INIT
2136 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2137 #[cfg(target_pointer_width = "16")]
2138 macro_rules! ptr_width {
2141 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2142 #[cfg(target_pointer_width = "32")]
2143 macro_rules! ptr_width {
2146 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2147 #[cfg(target_pointer_width = "64")]
2148 macro_rules! ptr_width {
2151 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2153 cfg(target_has_atomic = "ptr"),
2154 stable(feature = "rust1", since = "1.0.0"),
2155 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2156 stable(feature = "atomic_debug", since = "1.3.0"),
2157 stable(feature = "atomic_access", since = "1.15.0"),
2158 stable(feature = "atomic_from", since = "1.23.0"),
2159 stable(feature = "atomic_nand", since = "1.27.0"),
2160 stable(feature = "rust1", since = "1.0.0"),
2161 "isize", "../../../std/primitive.isize.html",
2163 atomic_min, atomic_max,
2165 "AtomicIsize::new(0)",
2166 isize AtomicIsize ATOMIC_ISIZE_INIT
2168 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2170 cfg(target_has_atomic = "ptr"),
2171 stable(feature = "rust1", since = "1.0.0"),
2172 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2173 stable(feature = "atomic_debug", since = "1.3.0"),
2174 stable(feature = "atomic_access", since = "1.15.0"),
2175 stable(feature = "atomic_from", since = "1.23.0"),
2176 stable(feature = "atomic_nand", since = "1.27.0"),
2177 stable(feature = "rust1", since = "1.0.0"),
2178 "usize", "../../../std/primitive.usize.html",
2180 atomic_umin, atomic_umax,
2182 "AtomicUsize::new(0)",
2183 usize AtomicUsize ATOMIC_USIZE_INIT
2187 #[cfg(target_has_atomic = "8")]
2188 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2199 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2201 Release => intrinsics::atomic_store_rel(dst, val),
2202 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2203 SeqCst => intrinsics::atomic_store(dst, val),
2204 Acquire => panic!("there is no such thing as an acquire store"),
2205 AcqRel => panic!("there is no such thing as an acquire/release store"),
2210 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2212 Acquire => intrinsics::atomic_load_acq(dst),
2213 Relaxed => intrinsics::atomic_load_relaxed(dst),
2214 SeqCst => intrinsics::atomic_load(dst),
2215 Release => panic!("there is no such thing as a release load"),
2216 AcqRel => panic!("there is no such thing as an acquire/release load"),
2221 #[cfg(target_has_atomic = "8")]
2222 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2224 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2225 Release => intrinsics::atomic_xchg_rel(dst, val),
2226 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2227 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2228 SeqCst => intrinsics::atomic_xchg(dst, val),
2232 /// Returns the previous value (like __sync_fetch_and_add).
2234 #[cfg(target_has_atomic = "8")]
2235 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2237 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2238 Release => intrinsics::atomic_xadd_rel(dst, val),
2239 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2240 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2241 SeqCst => intrinsics::atomic_xadd(dst, val),
2245 /// Returns the previous value (like __sync_fetch_and_sub).
2247 #[cfg(target_has_atomic = "8")]
2248 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2250 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2251 Release => intrinsics::atomic_xsub_rel(dst, val),
2252 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2253 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2254 SeqCst => intrinsics::atomic_xsub(dst, val),
2259 #[cfg(target_has_atomic = "8")]
2260 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
2266 let (val, ok) = match (success, failure) {
2267 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2268 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2269 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2270 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2271 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2272 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2273 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2274 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2275 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2276 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2277 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2278 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2280 if ok { Ok(val) } else { Err(val) }
2284 #[cfg(target_has_atomic = "8")]
2285 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
2291 let (val, ok) = match (success, failure) {
2292 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2293 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2294 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2295 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2296 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2297 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2298 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2299 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2300 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2301 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2302 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2303 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2305 if ok { Ok(val) } else { Err(val) }
2309 #[cfg(target_has_atomic = "8")]
2310 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2312 Acquire => intrinsics::atomic_and_acq(dst, val),
2313 Release => intrinsics::atomic_and_rel(dst, val),
2314 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2315 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2316 SeqCst => intrinsics::atomic_and(dst, val),
2321 #[cfg(target_has_atomic = "8")]
2322 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2324 Acquire => intrinsics::atomic_nand_acq(dst, val),
2325 Release => intrinsics::atomic_nand_rel(dst, val),
2326 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2327 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2328 SeqCst => intrinsics::atomic_nand(dst, val),
2333 #[cfg(target_has_atomic = "8")]
2334 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2336 Acquire => intrinsics::atomic_or_acq(dst, val),
2337 Release => intrinsics::atomic_or_rel(dst, val),
2338 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2339 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2340 SeqCst => intrinsics::atomic_or(dst, val),
2345 #[cfg(target_has_atomic = "8")]
2346 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2348 Acquire => intrinsics::atomic_xor_acq(dst, val),
2349 Release => intrinsics::atomic_xor_rel(dst, val),
2350 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2351 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2352 SeqCst => intrinsics::atomic_xor(dst, val),
2356 /// returns the max value (signed comparison)
2358 #[cfg(target_has_atomic = "8")]
2359 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2361 Acquire => intrinsics::atomic_max_acq(dst, val),
2362 Release => intrinsics::atomic_max_rel(dst, val),
2363 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2364 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2365 SeqCst => intrinsics::atomic_max(dst, val),
2369 /// returns the min value (signed comparison)
2371 #[cfg(target_has_atomic = "8")]
2372 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2374 Acquire => intrinsics::atomic_min_acq(dst, val),
2375 Release => intrinsics::atomic_min_rel(dst, val),
2376 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2377 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2378 SeqCst => intrinsics::atomic_min(dst, val),
2382 /// returns the max value (signed comparison)
2384 #[cfg(target_has_atomic = "8")]
2385 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2387 Acquire => intrinsics::atomic_umax_acq(dst, val),
2388 Release => intrinsics::atomic_umax_rel(dst, val),
2389 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2390 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2391 SeqCst => intrinsics::atomic_umax(dst, val),
2395 /// returns the min value (signed comparison)
2397 #[cfg(target_has_atomic = "8")]
2398 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2400 Acquire => intrinsics::atomic_umin_acq(dst, val),
2401 Release => intrinsics::atomic_umin_rel(dst, val),
2402 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2403 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2404 SeqCst => intrinsics::atomic_umin(dst, val),
2408 /// An atomic fence.
2410 /// Depending on the specified order, a fence prevents the compiler and CPU from
2411 /// reordering certain types of memory operations around it.
2412 /// That creates synchronizes-with relationships between it and atomic operations
2413 /// or fences in other threads.
2415 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2416 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2417 /// exist operations X and Y, both operating on some atomic object 'M' such
2418 /// that A is sequenced before X, Y is synchronized before B and Y observes
2419 /// the change to M. This provides a happens-before dependence between A and B.
2422 /// Thread 1 Thread 2
2424 /// fence(Release); A --------------
2425 /// x.store(3, Relaxed); X --------- |
2428 /// -------------> Y if x.load(Relaxed) == 3 {
2429 /// |-------> B fence(Acquire);
2434 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2437 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2438 /// and [`Release`] semantics, participates in the global program order of the
2439 /// other [`SeqCst`] operations and/or fences.
2441 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2445 /// Panics if `order` is [`Relaxed`].
2450 /// use std::sync::atomic::AtomicBool;
2451 /// use std::sync::atomic::fence;
2452 /// use std::sync::atomic::Ordering;
2454 /// // A mutual exclusion primitive based on spinlock.
2455 /// pub struct Mutex {
2456 /// flag: AtomicBool,
2460 /// pub fn new() -> Mutex {
2462 /// flag: AtomicBool::new(false),
2466 /// pub fn lock(&self) {
2467 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2468 /// // This fence synchronizes-with store in `unlock`.
2469 /// fence(Ordering::Acquire);
2472 /// pub fn unlock(&self) {
2473 /// self.flag.store(false, Ordering::Release);
2478 /// [`Ordering`]: enum.Ordering.html
2479 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2480 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2481 /// [`Release`]: enum.Ordering.html#variant.Release
2482 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2483 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2485 #[stable(feature = "rust1", since = "1.0.0")]
2486 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2487 pub fn fence(order: Ordering) {
2488 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2489 // they will cause LLVM to abort. The wasm instruction set doesn't have
2490 // fences right now. There's discussion online about the best way for tools
2491 // to conventionally implement fences at
2492 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2493 // follow that discussion and implement a solution when one comes about!
2494 #[cfg(not(target_arch = "wasm32"))]
2497 Acquire => intrinsics::atomic_fence_acq(),
2498 Release => intrinsics::atomic_fence_rel(),
2499 AcqRel => intrinsics::atomic_fence_acqrel(),
2500 SeqCst => intrinsics::atomic_fence(),
2501 Relaxed => panic!("there is no such thing as a relaxed fence"),
2507 /// A compiler memory fence.
2509 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2510 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2511 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2512 /// or writes from before or after the call to the other side of the call to
2513 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2514 /// from doing such re-ordering. This is not a problem in a single-threaded,
2515 /// execution context, but when other threads may modify memory at the same
2516 /// time, stronger synchronization primitives such as [`fence`] are required.
2518 /// The re-ordering prevented by the different ordering semantics are:
2520 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2521 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2522 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2523 /// - with [`AcqRel`], both of the above rules are enforced.
2525 /// `compiler_fence` is generally only useful for preventing a thread from
2526 /// racing *with itself*. That is, if a given thread is executing one piece
2527 /// of code, and is then interrupted, and starts executing code elsewhere
2528 /// (while still in the same thread, and conceptually still on the same
2529 /// core). In traditional programs, this can only occur when a signal
2530 /// handler is registered. In more low-level code, such situations can also
2531 /// arise when handling interrupts, when implementing green threads with
2532 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2533 /// discussion of [memory barriers].
2537 /// Panics if `order` is [`Relaxed`].
2541 /// Without `compiler_fence`, the `assert_eq!` in following code
2542 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2543 /// To see why, remember that the compiler is free to swap the stores to
2544 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2545 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2546 /// after `IS_READY` is updated, then the signal handler will see
2547 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2548 /// Using a `compiler_fence` remedies this situation.
2551 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2552 /// use std::sync::atomic::Ordering;
2553 /// use std::sync::atomic::compiler_fence;
2555 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2556 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2559 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2560 /// // prevent earlier writes from being moved beyond this point
2561 /// compiler_fence(Ordering::Release);
2562 /// IS_READY.store(true, Ordering::Relaxed);
2565 /// fn signal_handler() {
2566 /// if IS_READY.load(Ordering::Relaxed) {
2567 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2572 /// [`fence`]: fn.fence.html
2573 /// [`Ordering`]: enum.Ordering.html
2574 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2575 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2576 /// [`Release`]: enum.Ordering.html#variant.Release
2577 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2578 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2579 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2581 #[stable(feature = "compiler_fences", since = "1.21.0")]
2582 pub fn compiler_fence(order: Ordering) {
2585 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2586 Release => intrinsics::atomic_singlethreadfence_rel(),
2587 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2588 SeqCst => intrinsics::atomic_singlethreadfence(),
2589 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2595 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
2596 #[stable(feature = "atomic_debug", since = "1.3.0")]
2597 impl fmt::Debug for AtomicBool {
2598 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2599 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2603 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2604 #[stable(feature = "atomic_debug", since = "1.3.0")]
2605 impl<T> fmt::Debug for AtomicPtr<T> {
2606 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2607 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2611 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2612 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2613 impl<T> fmt::Pointer for AtomicPtr<T> {
2614 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2615 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)