3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 // ignore-tidy-undocumented-unsafe
117 #![stable(feature = "rust1", since = "1.0.0")]
118 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
119 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
121 use self::Ordering::*;
123 use crate::intrinsics;
124 use crate::cell::UnsafeCell;
127 use crate::hint::spin_loop;
129 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
131 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
132 /// power or switching hyper-threads.
134 /// This function is different from [`std::thread::yield_now`] which directly yields to the
135 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
137 /// Spin locks can be very efficient for short lock durations because they do not involve context
138 /// switches or interaction with the operating system. For long lock durations they become wasteful
139 /// however because they use CPU cycles for the entire lock duration, and using a
140 /// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is
141 /// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`]
142 /// or [`std::thread::sleep`] may be the best option.
144 /// **Note**: Spin locks are based on the underlying assumption that another thread will release
145 /// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or
146 /// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms.
148 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
149 /// do anything at all.
151 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
152 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
153 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
155 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
156 pub fn spin_loop_hint() {
160 /// A boolean type which can be safely shared between threads.
162 /// This type has the same in-memory representation as a [`bool`].
164 /// [`bool`]: ../../../std/primitive.bool.html
165 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
166 #[stable(feature = "rust1", since = "1.0.0")]
168 pub struct AtomicBool {
172 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
173 #[stable(feature = "rust1", since = "1.0.0")]
174 impl Default for AtomicBool {
175 /// Creates an `AtomicBool` initialized to `false`.
176 fn default() -> Self {
181 // Send is implicitly implemented for AtomicBool.
182 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
183 #[stable(feature = "rust1", since = "1.0.0")]
184 unsafe impl Sync for AtomicBool {}
186 /// A raw pointer type which can be safely shared between threads.
188 /// This type has the same in-memory representation as a `*mut T`.
189 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
190 #[stable(feature = "rust1", since = "1.0.0")]
191 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
192 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
193 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
194 pub struct AtomicPtr<T> {
195 p: UnsafeCell<*mut T>,
198 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
199 #[stable(feature = "rust1", since = "1.0.0")]
200 impl<T> Default for AtomicPtr<T> {
201 /// Creates a null `AtomicPtr<T>`.
202 fn default() -> AtomicPtr<T> {
203 AtomicPtr::new(crate::ptr::null_mut())
207 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
208 #[stable(feature = "rust1", since = "1.0.0")]
209 unsafe impl<T> Send for AtomicPtr<T> {}
210 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
211 #[stable(feature = "rust1", since = "1.0.0")]
212 unsafe impl<T> Sync for AtomicPtr<T> {}
214 /// Atomic memory orderings
216 /// Memory orderings specify the way atomic operations synchronize memory.
217 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
218 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
219 /// operations synchronize other memory while additionally preserving a total order of such
220 /// operations across all threads.
222 /// Rust's memory orderings are [the same as those of
223 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
225 /// For more information see the [nomicon].
227 /// [nomicon]: ../../../nomicon/atomics.html
228 /// [Ordering::Relaxed]: #variant.Relaxed
229 /// [Ordering::SeqCst]: #variant.SeqCst
230 #[stable(feature = "rust1", since = "1.0.0")]
231 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
234 /// No ordering constraints, only atomic operations.
236 /// Corresponds to [`memory_order_relaxed`] in C++20.
238 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
239 #[stable(feature = "rust1", since = "1.0.0")]
241 /// When coupled with a store, all previous operations become ordered
242 /// before any load of this value with [`Acquire`] (or stronger) ordering.
243 /// In particular, all previous writes become visible to all threads
244 /// that perform an [`Acquire`] (or stronger) load of this value.
246 /// Notice that using this ordering for an operation that combines loads
247 /// and stores leads to a [`Relaxed`] load operation!
249 /// This ordering is only applicable for operations that can perform a store.
251 /// Corresponds to [`memory_order_release`] in C++20.
253 /// [`Release`]: #variant.Release
254 /// [`Acquire`]: #variant.Acquire
255 /// [`Relaxed`]: #variant.Relaxed
256 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
257 #[stable(feature = "rust1", since = "1.0.0")]
259 /// When coupled with a load, if the loaded value was written by a store operation with
260 /// [`Release`] (or stronger) ordering, then all subsequent operations
261 /// become ordered after that store. In particular, all subsequent loads will see data
262 /// written before the store.
264 /// Notice that using this ordering for an operation that combines loads
265 /// and stores leads to a [`Relaxed`] store operation!
267 /// This ordering is only applicable for operations that can perform a load.
269 /// Corresponds to [`memory_order_acquire`] in C++20.
271 /// [`Acquire`]: #variant.Acquire
272 /// [`Release`]: #variant.Release
273 /// [`Relaxed`]: #variant.Relaxed
274 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
275 #[stable(feature = "rust1", since = "1.0.0")]
277 /// Has the effects of both [`Acquire`] and [`Release`] together:
278 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
280 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
281 /// not performing any store and hence it has just [`Acquire`] ordering. However,
282 /// `AcqRel` will never perform [`Relaxed`] accesses.
284 /// This ordering is only applicable for operations that combine both loads and stores.
286 /// Corresponds to [`memory_order_acq_rel`] in C++20.
288 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
289 /// [`Acquire`]: #variant.Acquire
290 /// [`Release`]: #variant.Release
291 /// [`Relaxed`]: #variant.Relaxed
292 #[stable(feature = "rust1", since = "1.0.0")]
294 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
295 /// operations, respectively) with the additional guarantee that all threads see all
296 /// sequentially consistent operations in the same order.
298 /// Corresponds to [`memory_order_seq_cst`] in C++20.
300 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
301 /// [`Acquire`]: #variant.Acquire
302 /// [`Release`]: #variant.Release
303 /// [`AcqRel`]: #variant.AcqRel
304 #[stable(feature = "rust1", since = "1.0.0")]
308 /// An [`AtomicBool`] initialized to `false`.
310 /// [`AtomicBool`]: struct.AtomicBool.html
311 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
312 #[stable(feature = "rust1", since = "1.0.0")]
315 reason = "the `new` function is now preferred",
316 suggestion = "AtomicBool::new(false)",
318 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
320 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
322 /// Creates a new `AtomicBool`.
327 /// use std::sync::atomic::AtomicBool;
329 /// let atomic_true = AtomicBool::new(true);
330 /// let atomic_false = AtomicBool::new(false);
333 #[stable(feature = "rust1", since = "1.0.0")]
334 pub const fn new(v: bool) -> AtomicBool {
335 AtomicBool { v: UnsafeCell::new(v as u8) }
338 /// Returns a mutable reference to the underlying [`bool`].
340 /// This is safe because the mutable reference guarantees that no other threads are
341 /// concurrently accessing the atomic data.
343 /// [`bool`]: ../../../std/primitive.bool.html
348 /// use std::sync::atomic::{AtomicBool, Ordering};
350 /// let mut some_bool = AtomicBool::new(true);
351 /// assert_eq!(*some_bool.get_mut(), true);
352 /// *some_bool.get_mut() = false;
353 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
356 #[stable(feature = "atomic_access", since = "1.15.0")]
357 pub fn get_mut(&mut self) -> &mut bool {
358 unsafe { &mut *(self.v.get() as *mut bool) }
361 /// Consumes the atomic and returns the contained value.
363 /// This is safe because passing `self` by value guarantees that no other threads are
364 /// concurrently accessing the atomic data.
369 /// use std::sync::atomic::AtomicBool;
371 /// let some_bool = AtomicBool::new(true);
372 /// assert_eq!(some_bool.into_inner(), true);
375 #[stable(feature = "atomic_access", since = "1.15.0")]
376 pub fn into_inner(self) -> bool {
377 self.v.into_inner() != 0
380 /// Loads a value from the bool.
382 /// `load` takes an [`Ordering`] argument which describes the memory ordering
383 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
387 /// Panics if `order` is [`Release`] or [`AcqRel`].
389 /// [`Ordering`]: enum.Ordering.html
390 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
391 /// [`Release`]: enum.Ordering.html#variant.Release
392 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
393 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
394 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
399 /// use std::sync::atomic::{AtomicBool, Ordering};
401 /// let some_bool = AtomicBool::new(true);
403 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
406 #[stable(feature = "rust1", since = "1.0.0")]
407 pub fn load(&self, order: Ordering) -> bool {
408 unsafe { atomic_load(self.v.get(), order) != 0 }
411 /// Stores a value into the bool.
413 /// `store` takes an [`Ordering`] argument which describes the memory ordering
414 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
418 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
420 /// [`Ordering`]: enum.Ordering.html
421 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
422 /// [`Release`]: enum.Ordering.html#variant.Release
423 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
424 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
425 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
430 /// use std::sync::atomic::{AtomicBool, Ordering};
432 /// let some_bool = AtomicBool::new(true);
434 /// some_bool.store(false, Ordering::Relaxed);
435 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
438 #[stable(feature = "rust1", since = "1.0.0")]
439 pub fn store(&self, val: bool, order: Ordering) {
441 atomic_store(self.v.get(), val as u8, order);
445 /// Stores a value into the bool, returning the previous value.
447 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
448 /// of this operation. All ordering modes are possible. Note that using
449 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
450 /// using [`Release`] makes the load part [`Relaxed`].
452 /// [`Ordering`]: enum.Ordering.html
453 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
454 /// [`Release`]: enum.Ordering.html#variant.Release
455 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
460 /// use std::sync::atomic::{AtomicBool, Ordering};
462 /// let some_bool = AtomicBool::new(true);
464 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
465 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
468 #[stable(feature = "rust1", since = "1.0.0")]
469 #[cfg(target_has_atomic = "8")]
470 pub fn swap(&self, val: bool, order: Ordering) -> bool {
471 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
474 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
476 /// The return value is always the previous value. If it is equal to `current`, then the value
479 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
480 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
481 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
482 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
483 /// happens, and using [`Release`] makes the load part [`Relaxed`].
485 /// [`Ordering`]: enum.Ordering.html
486 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
487 /// [`Release`]: enum.Ordering.html#variant.Release
488 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
489 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
490 /// [`bool`]: ../../../std/primitive.bool.html
495 /// use std::sync::atomic::{AtomicBool, Ordering};
497 /// let some_bool = AtomicBool::new(true);
499 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
500 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
502 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
503 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
506 #[stable(feature = "rust1", since = "1.0.0")]
507 #[cfg(target_has_atomic = "8")]
508 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
509 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
515 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
517 /// The return value is a result indicating whether the new value was written and containing
518 /// the previous value. On success this value is guaranteed to be equal to `current`.
520 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
521 /// ordering of this operation. The first describes the required ordering if the
522 /// operation succeeds while the second describes the required ordering when the
523 /// operation fails. Using [`Acquire`] as success ordering makes the store part
524 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
525 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
526 /// and must be equivalent to or weaker than the success ordering.
529 /// [`bool`]: ../../../std/primitive.bool.html
530 /// [`Ordering`]: enum.Ordering.html
531 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
532 /// [`Release`]: enum.Ordering.html#variant.Release
533 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
534 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
539 /// use std::sync::atomic::{AtomicBool, Ordering};
541 /// let some_bool = AtomicBool::new(true);
543 /// assert_eq!(some_bool.compare_exchange(true,
545 /// Ordering::Acquire,
546 /// Ordering::Relaxed),
548 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
550 /// assert_eq!(some_bool.compare_exchange(true, true,
551 /// Ordering::SeqCst,
552 /// Ordering::Acquire),
554 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
557 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
558 #[cfg(target_has_atomic = "8")]
559 pub fn compare_exchange(&self,
564 -> Result<bool, bool> {
566 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
569 Err(x) => Err(x != 0),
573 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
575 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
576 /// comparison succeeds, which can result in more efficient code on some platforms. The
577 /// return value is a result indicating whether the new value was written and containing the
580 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
581 /// ordering of this operation. The first describes the required ordering if the
582 /// operation succeeds while the second describes the required ordering when the
583 /// operation fails. Using [`Acquire`] as success ordering makes the store part
584 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
585 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
586 /// and must be equivalent to or weaker than the success ordering.
588 /// [`bool`]: ../../../std/primitive.bool.html
589 /// [`compare_exchange`]: #method.compare_exchange
590 /// [`Ordering`]: enum.Ordering.html
591 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
592 /// [`Release`]: enum.Ordering.html#variant.Release
593 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
594 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
599 /// use std::sync::atomic::{AtomicBool, Ordering};
601 /// let val = AtomicBool::new(false);
604 /// let mut old = val.load(Ordering::Relaxed);
606 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
608 /// Err(x) => old = x,
613 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
614 #[cfg(target_has_atomic = "8")]
615 pub fn compare_exchange_weak(&self,
620 -> Result<bool, bool> {
622 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
625 Err(x) => Err(x != 0),
629 /// Logical "and" with a boolean value.
631 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
632 /// the new value to the result.
634 /// Returns the previous value.
636 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
637 /// of this operation. All ordering modes are possible. Note that using
638 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
639 /// using [`Release`] makes the load part [`Relaxed`].
641 /// [`Ordering`]: enum.Ordering.html
642 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
643 /// [`Release`]: enum.Ordering.html#variant.Release
644 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
649 /// use std::sync::atomic::{AtomicBool, Ordering};
651 /// let foo = AtomicBool::new(true);
652 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
653 /// assert_eq!(foo.load(Ordering::SeqCst), false);
655 /// let foo = AtomicBool::new(true);
656 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
657 /// assert_eq!(foo.load(Ordering::SeqCst), true);
659 /// let foo = AtomicBool::new(false);
660 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
661 /// assert_eq!(foo.load(Ordering::SeqCst), false);
664 #[stable(feature = "rust1", since = "1.0.0")]
665 #[cfg(target_has_atomic = "8")]
666 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
667 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
670 /// Logical "nand" with a boolean value.
672 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
673 /// the new value to the result.
675 /// Returns the previous value.
677 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
678 /// of this operation. All ordering modes are possible. Note that using
679 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
680 /// using [`Release`] makes the load part [`Relaxed`].
682 /// [`Ordering`]: enum.Ordering.html
683 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
684 /// [`Release`]: enum.Ordering.html#variant.Release
685 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
690 /// use std::sync::atomic::{AtomicBool, Ordering};
692 /// let foo = AtomicBool::new(true);
693 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
694 /// assert_eq!(foo.load(Ordering::SeqCst), true);
696 /// let foo = AtomicBool::new(true);
697 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
698 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
699 /// assert_eq!(foo.load(Ordering::SeqCst), false);
701 /// let foo = AtomicBool::new(false);
702 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
703 /// assert_eq!(foo.load(Ordering::SeqCst), true);
706 #[stable(feature = "rust1", since = "1.0.0")]
707 #[cfg(target_has_atomic = "8")]
708 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
709 // We can't use atomic_nand here because it can result in a bool with
710 // an invalid value. This happens because the atomic operation is done
711 // with an 8-bit integer internally, which would set the upper 7 bits.
712 // So we just use fetch_xor or swap instead.
715 // We must invert the bool.
716 self.fetch_xor(true, order)
718 // !(x & false) == true
719 // We must set the bool to true.
720 self.swap(true, order)
724 /// Logical "or" with a boolean value.
726 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
727 /// new value to the result.
729 /// Returns the previous value.
731 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
732 /// of this operation. All ordering modes are possible. Note that using
733 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
734 /// using [`Release`] makes the load part [`Relaxed`].
736 /// [`Ordering`]: enum.Ordering.html
737 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
738 /// [`Release`]: enum.Ordering.html#variant.Release
739 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
744 /// use std::sync::atomic::{AtomicBool, Ordering};
746 /// let foo = AtomicBool::new(true);
747 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
748 /// assert_eq!(foo.load(Ordering::SeqCst), true);
750 /// let foo = AtomicBool::new(true);
751 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
752 /// assert_eq!(foo.load(Ordering::SeqCst), true);
754 /// let foo = AtomicBool::new(false);
755 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
756 /// assert_eq!(foo.load(Ordering::SeqCst), false);
759 #[stable(feature = "rust1", since = "1.0.0")]
760 #[cfg(target_has_atomic = "8")]
761 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
762 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
765 /// Logical "xor" with a boolean value.
767 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
768 /// the new value to the result.
770 /// Returns the previous value.
772 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
773 /// of this operation. All ordering modes are possible. Note that using
774 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
775 /// using [`Release`] makes the load part [`Relaxed`].
777 /// [`Ordering`]: enum.Ordering.html
778 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
779 /// [`Release`]: enum.Ordering.html#variant.Release
780 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
785 /// use std::sync::atomic::{AtomicBool, Ordering};
787 /// let foo = AtomicBool::new(true);
788 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
789 /// assert_eq!(foo.load(Ordering::SeqCst), true);
791 /// let foo = AtomicBool::new(true);
792 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
793 /// assert_eq!(foo.load(Ordering::SeqCst), false);
795 /// let foo = AtomicBool::new(false);
796 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
797 /// assert_eq!(foo.load(Ordering::SeqCst), false);
800 #[stable(feature = "rust1", since = "1.0.0")]
801 #[cfg(target_has_atomic = "8")]
802 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
803 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
806 /// Returns a mutable pointer to the underlying [`bool`].
808 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
809 /// This method is mostly useful for FFI, where the function signature may use
810 /// `*mut bool` instead of `&AtomicBool`.
812 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
813 /// atomic types work with interior mutability. All modifications of an atomic change the value
814 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
815 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
816 /// restriction: operations on it must be atomic.
818 /// [`bool`]: ../../../std/primitive.bool.html
822 /// ```ignore (extern-declaration)
824 /// use std::sync::atomic::AtomicBool;
826 /// fn my_atomic_op(arg: *mut bool);
829 /// let mut atomic = AtomicBool::new(true);
831 /// my_atomic_op(atomic.as_mut_ptr());
836 #[unstable(feature = "atomic_mut_ptr",
837 reason = "recently added",
839 pub fn as_mut_ptr(&self) -> *mut bool {
840 self.v.get() as *mut bool
844 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
845 impl<T> AtomicPtr<T> {
846 /// Creates a new `AtomicPtr`.
851 /// use std::sync::atomic::AtomicPtr;
853 /// let ptr = &mut 5;
854 /// let atomic_ptr = AtomicPtr::new(ptr);
857 #[stable(feature = "rust1", since = "1.0.0")]
858 pub const fn new(p: *mut T) -> AtomicPtr<T> {
859 AtomicPtr { p: UnsafeCell::new(p) }
862 /// Returns a mutable reference to the underlying pointer.
864 /// This is safe because the mutable reference guarantees that no other threads are
865 /// concurrently accessing the atomic data.
870 /// use std::sync::atomic::{AtomicPtr, Ordering};
872 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
873 /// *atomic_ptr.get_mut() = &mut 5;
874 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
877 #[stable(feature = "atomic_access", since = "1.15.0")]
878 pub fn get_mut(&mut self) -> &mut *mut T {
879 unsafe { &mut *self.p.get() }
882 /// Consumes the atomic and returns the contained value.
884 /// This is safe because passing `self` by value guarantees that no other threads are
885 /// concurrently accessing the atomic data.
890 /// use std::sync::atomic::AtomicPtr;
892 /// let atomic_ptr = AtomicPtr::new(&mut 5);
893 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
896 #[stable(feature = "atomic_access", since = "1.15.0")]
897 pub fn into_inner(self) -> *mut T {
901 /// Loads a value from the pointer.
903 /// `load` takes an [`Ordering`] argument which describes the memory ordering
904 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
908 /// Panics if `order` is [`Release`] or [`AcqRel`].
910 /// [`Ordering`]: enum.Ordering.html
911 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
912 /// [`Release`]: enum.Ordering.html#variant.Release
913 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
914 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
915 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
920 /// use std::sync::atomic::{AtomicPtr, Ordering};
922 /// let ptr = &mut 5;
923 /// let some_ptr = AtomicPtr::new(ptr);
925 /// let value = some_ptr.load(Ordering::Relaxed);
928 #[stable(feature = "rust1", since = "1.0.0")]
929 pub fn load(&self, order: Ordering) -> *mut T {
930 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
933 /// Stores a value into the pointer.
935 /// `store` takes an [`Ordering`] argument which describes the memory ordering
936 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
940 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
942 /// [`Ordering`]: enum.Ordering.html
943 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
944 /// [`Release`]: enum.Ordering.html#variant.Release
945 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
946 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
947 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
952 /// use std::sync::atomic::{AtomicPtr, Ordering};
954 /// let ptr = &mut 5;
955 /// let some_ptr = AtomicPtr::new(ptr);
957 /// let other_ptr = &mut 10;
959 /// some_ptr.store(other_ptr, Ordering::Relaxed);
962 #[stable(feature = "rust1", since = "1.0.0")]
963 pub fn store(&self, ptr: *mut T, order: Ordering) {
965 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
969 /// Stores a value into the pointer, returning the previous value.
971 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
972 /// of this operation. All ordering modes are possible. Note that using
973 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
974 /// using [`Release`] makes the load part [`Relaxed`].
976 /// [`Ordering`]: enum.Ordering.html
977 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
978 /// [`Release`]: enum.Ordering.html#variant.Release
979 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
984 /// use std::sync::atomic::{AtomicPtr, Ordering};
986 /// let ptr = &mut 5;
987 /// let some_ptr = AtomicPtr::new(ptr);
989 /// let other_ptr = &mut 10;
991 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
994 #[stable(feature = "rust1", since = "1.0.0")]
995 #[cfg(target_has_atomic = "ptr")]
996 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
997 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1000 /// Stores a value into the pointer if the current value is the same as the `current` value.
1002 /// The return value is always the previous value. If it is equal to `current`, then the value
1005 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1006 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1007 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1008 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1009 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1011 /// [`Ordering`]: enum.Ordering.html
1012 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1013 /// [`Release`]: enum.Ordering.html#variant.Release
1014 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1015 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1020 /// use std::sync::atomic::{AtomicPtr, Ordering};
1022 /// let ptr = &mut 5;
1023 /// let some_ptr = AtomicPtr::new(ptr);
1025 /// let other_ptr = &mut 10;
1027 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1030 #[stable(feature = "rust1", since = "1.0.0")]
1031 #[cfg(target_has_atomic = "ptr")]
1032 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1033 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1039 /// Stores a value into the pointer if the current value is the same as the `current` value.
1041 /// The return value is a result indicating whether the new value was written and containing
1042 /// the previous value. On success this value is guaranteed to be equal to `current`.
1044 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1045 /// ordering of this operation. The first describes the required ordering if the
1046 /// operation succeeds while the second describes the required ordering when the
1047 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1048 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1049 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1050 /// and must be equivalent to or weaker than the success ordering.
1052 /// [`Ordering`]: enum.Ordering.html
1053 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1054 /// [`Release`]: enum.Ordering.html#variant.Release
1055 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1056 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1061 /// use std::sync::atomic::{AtomicPtr, Ordering};
1063 /// let ptr = &mut 5;
1064 /// let some_ptr = AtomicPtr::new(ptr);
1066 /// let other_ptr = &mut 10;
1068 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1069 /// Ordering::SeqCst, Ordering::Relaxed);
1072 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1073 #[cfg(target_has_atomic = "ptr")]
1074 pub fn compare_exchange(&self,
1079 -> Result<*mut T, *mut T> {
1081 let res = atomic_compare_exchange(self.p.get() as *mut usize,
1087 Ok(x) => Ok(x as *mut T),
1088 Err(x) => Err(x as *mut T),
1093 /// Stores a value into the pointer if the current value is the same as the `current` value.
1095 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1096 /// comparison succeeds, which can result in more efficient code on some platforms. The
1097 /// return value is a result indicating whether the new value was written and containing the
1100 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1101 /// ordering of this operation. The first describes the required ordering if the
1102 /// operation succeeds while the second describes the required ordering when the
1103 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1104 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1105 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1106 /// and must be equivalent to or weaker than the success ordering.
1108 /// [`compare_exchange`]: #method.compare_exchange
1109 /// [`Ordering`]: enum.Ordering.html
1110 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1111 /// [`Release`]: enum.Ordering.html#variant.Release
1112 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1113 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1118 /// use std::sync::atomic::{AtomicPtr, Ordering};
1120 /// let some_ptr = AtomicPtr::new(&mut 5);
1122 /// let new = &mut 10;
1123 /// let mut old = some_ptr.load(Ordering::Relaxed);
1125 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1127 /// Err(x) => old = x,
1132 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1133 #[cfg(target_has_atomic = "ptr")]
1134 pub fn compare_exchange_weak(&self,
1139 -> Result<*mut T, *mut T> {
1141 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
1147 Ok(x) => Ok(x as *mut T),
1148 Err(x) => Err(x as *mut T),
1154 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1155 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1156 impl From<bool> for AtomicBool {
1157 /// Converts a `bool` into an `AtomicBool`.
1162 /// use std::sync::atomic::AtomicBool;
1163 /// let atomic_bool = AtomicBool::from(true);
1164 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1167 fn from(b: bool) -> Self { Self::new(b) }
1170 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
1171 #[stable(feature = "atomic_from", since = "1.23.0")]
1172 impl<T> From<*mut T> for AtomicPtr<T> {
1174 fn from(p: *mut T) -> Self { Self::new(p) }
1177 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1178 macro_rules! atomic_int {
1183 $stable_access:meta,
1186 $stable_init_const:meta,
1187 $s_int_type:expr, $int_ref:expr,
1188 $extra_feature:expr,
1189 $min_fn:ident, $max_fn:ident,
1192 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1193 /// An integer type which can be safely shared between threads.
1195 /// This type has the same in-memory representation as the underlying
1196 /// integer type, [`
1197 #[doc = $s_int_type]
1200 /// ). For more about the differences between atomic types and
1201 /// non-atomic types as well as information about the portability of
1202 /// this type, please see the [module-level documentation].
1204 /// [module-level documentation]: index.html
1206 #[repr(C, align($align))]
1207 pub struct $atomic_type {
1208 v: UnsafeCell<$int_type>,
1211 /// An atomic integer initialized to `0`.
1212 #[$stable_init_const]
1215 reason = "the `new` function is now preferred",
1216 suggestion = $atomic_new,
1218 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1221 impl Default for $atomic_type {
1222 fn default() -> Self {
1223 Self::new(Default::default())
1228 impl From<$int_type> for $atomic_type {
1231 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1233 fn from(v: $int_type) -> Self { Self::new(v) }
1238 impl fmt::Debug for $atomic_type {
1239 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1240 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1244 // Send is implicitly implemented.
1246 unsafe impl Sync for $atomic_type {}
1250 concat!("Creates a new atomic integer.
1255 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1257 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1261 pub const fn new(v: $int_type) -> Self {
1262 $atomic_type {v: UnsafeCell::new(v)}
1267 concat!("Returns a mutable reference to the underlying integer.
1269 This is safe because the mutable reference guarantees that no other threads are
1270 concurrently accessing the atomic data.
1275 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1277 let mut some_var = ", stringify!($atomic_type), "::new(10);
1278 assert_eq!(*some_var.get_mut(), 10);
1279 *some_var.get_mut() = 5;
1280 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1284 pub fn get_mut(&mut self) -> &mut $int_type {
1285 unsafe { &mut *self.v.get() }
1290 concat!("Consumes the atomic and returns the contained value.
1292 This is safe because passing `self` by value guarantees that no other threads are
1293 concurrently accessing the atomic data.
1298 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1300 let some_var = ", stringify!($atomic_type), "::new(5);
1301 assert_eq!(some_var.into_inner(), 5);
1305 pub fn into_inner(self) -> $int_type {
1311 concat!("Loads a value from the atomic integer.
1313 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1314 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1318 Panics if `order` is [`Release`] or [`AcqRel`].
1320 [`Ordering`]: enum.Ordering.html
1321 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1322 [`Release`]: enum.Ordering.html#variant.Release
1323 [`Acquire`]: enum.Ordering.html#variant.Acquire
1324 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1325 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1330 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1332 let some_var = ", stringify!($atomic_type), "::new(5);
1334 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1338 pub fn load(&self, order: Ordering) -> $int_type {
1339 unsafe { atomic_load(self.v.get(), order) }
1344 concat!("Stores a value into the atomic integer.
1346 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1347 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1351 Panics if `order` is [`Acquire`] or [`AcqRel`].
1353 [`Ordering`]: enum.Ordering.html
1354 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1355 [`Release`]: enum.Ordering.html#variant.Release
1356 [`Acquire`]: enum.Ordering.html#variant.Acquire
1357 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1358 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1363 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1365 let some_var = ", stringify!($atomic_type), "::new(5);
1367 some_var.store(10, Ordering::Relaxed);
1368 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1372 pub fn store(&self, val: $int_type, order: Ordering) {
1373 unsafe { atomic_store(self.v.get(), val, order); }
1378 concat!("Stores a value into the atomic integer, returning the previous value.
1380 `swap` takes an [`Ordering`] argument which describes the memory ordering
1381 of this operation. All ordering modes are possible. Note that using
1382 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1383 using [`Release`] makes the load part [`Relaxed`].
1385 [`Ordering`]: enum.Ordering.html
1386 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1387 [`Release`]: enum.Ordering.html#variant.Release
1388 [`Acquire`]: enum.Ordering.html#variant.Acquire
1393 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1395 let some_var = ", stringify!($atomic_type), "::new(5);
1397 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1402 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1403 unsafe { atomic_swap(self.v.get(), val, order) }
1408 concat!("Stores a value into the atomic integer if the current value is the same as
1409 the `current` value.
1411 The return value is always the previous value. If it is equal to `current`, then the
1414 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1415 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1416 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1417 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1418 happens, and using [`Release`] makes the load part [`Relaxed`].
1420 [`Ordering`]: enum.Ordering.html
1421 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1422 [`Release`]: enum.Ordering.html#variant.Release
1423 [`Acquire`]: enum.Ordering.html#variant.Acquire
1424 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1429 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1431 let some_var = ", stringify!($atomic_type), "::new(5);
1433 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1434 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1436 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1437 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1442 pub fn compare_and_swap(&self,
1445 order: Ordering) -> $int_type {
1446 match self.compare_exchange(current,
1449 strongest_failure_ordering(order)) {
1457 concat!("Stores a value into the atomic integer if the current value is the same as
1458 the `current` value.
1460 The return value is a result indicating whether the new value was written and
1461 containing the previous value. On success this value is guaranteed to be equal to
1464 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1465 ordering of this operation. The first describes the required ordering if the
1466 operation succeeds while the second describes the required ordering when the
1467 operation fails. Using [`Acquire`] as success ordering makes the store part
1468 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1469 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1470 and must be equivalent to or weaker than the success ordering.
1472 [`Ordering`]: enum.Ordering.html
1473 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1474 [`Release`]: enum.Ordering.html#variant.Release
1475 [`Acquire`]: enum.Ordering.html#variant.Acquire
1476 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1481 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1483 let some_var = ", stringify!($atomic_type), "::new(5);
1485 assert_eq!(some_var.compare_exchange(5, 10,
1489 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1491 assert_eq!(some_var.compare_exchange(6, 12,
1495 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1500 pub fn compare_exchange(&self,
1504 failure: Ordering) -> Result<$int_type, $int_type> {
1505 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1510 concat!("Stores a value into the atomic integer if the current value is the same as
1511 the `current` value.
1513 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1514 when the comparison succeeds, which can result in more efficient code on some
1515 platforms. The return value is a result indicating whether the new value was
1516 written and containing the previous value.
1518 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1519 ordering of this operation. The first describes the required ordering if the
1520 operation succeeds while the second describes the required ordering when the
1521 operation fails. Using [`Acquire`] as success ordering makes the store part
1522 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1523 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1524 and must be equivalent to or weaker than the success ordering.
1526 [`compare_exchange`]: #method.compare_exchange
1527 [`Ordering`]: enum.Ordering.html
1528 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1529 [`Release`]: enum.Ordering.html#variant.Release
1530 [`Acquire`]: enum.Ordering.html#variant.Acquire
1531 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1536 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1538 let val = ", stringify!($atomic_type), "::new(4);
1540 let mut old = val.load(Ordering::Relaxed);
1543 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1552 pub fn compare_exchange_weak(&self,
1556 failure: Ordering) -> Result<$int_type, $int_type> {
1558 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1564 concat!("Adds to the current value, returning the previous value.
1566 This operation wraps around on overflow.
1568 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1569 of this operation. All ordering modes are possible. Note that using
1570 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1571 using [`Release`] makes the load part [`Relaxed`].
1573 [`Ordering`]: enum.Ordering.html
1574 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1575 [`Release`]: enum.Ordering.html#variant.Release
1576 [`Acquire`]: enum.Ordering.html#variant.Acquire
1581 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1583 let foo = ", stringify!($atomic_type), "::new(0);
1584 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1585 assert_eq!(foo.load(Ordering::SeqCst), 10);
1590 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1591 unsafe { atomic_add(self.v.get(), val, order) }
1596 concat!("Subtracts from the current value, returning the previous value.
1598 This operation wraps around on overflow.
1600 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1601 of this operation. All ordering modes are possible. Note that using
1602 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1603 using [`Release`] makes the load part [`Relaxed`].
1605 [`Ordering`]: enum.Ordering.html
1606 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1607 [`Release`]: enum.Ordering.html#variant.Release
1608 [`Acquire`]: enum.Ordering.html#variant.Acquire
1613 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1615 let foo = ", stringify!($atomic_type), "::new(20);
1616 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1617 assert_eq!(foo.load(Ordering::SeqCst), 10);
1622 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1623 unsafe { atomic_sub(self.v.get(), val, order) }
1628 concat!("Bitwise \"and\" with the current value.
1630 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1631 sets the new value to the result.
1633 Returns the previous value.
1635 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1636 of this operation. All ordering modes are possible. Note that using
1637 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1638 using [`Release`] makes the load part [`Relaxed`].
1640 [`Ordering`]: enum.Ordering.html
1641 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1642 [`Release`]: enum.Ordering.html#variant.Release
1643 [`Acquire`]: enum.Ordering.html#variant.Acquire
1648 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1650 let foo = ", stringify!($atomic_type), "::new(0b101101);
1651 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1652 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1657 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1658 unsafe { atomic_and(self.v.get(), val, order) }
1663 concat!("Bitwise \"nand\" with the current value.
1665 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1666 sets the new value to the result.
1668 Returns the previous value.
1670 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1671 of this operation. All ordering modes are possible. Note that using
1672 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1673 using [`Release`] makes the load part [`Relaxed`].
1675 [`Ordering`]: enum.Ordering.html
1676 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1677 [`Release`]: enum.Ordering.html#variant.Release
1678 [`Acquire`]: enum.Ordering.html#variant.Acquire
1683 ", $extra_feature, "
1684 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1686 let foo = ", stringify!($atomic_type), "::new(0x13);
1687 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1688 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1693 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1694 unsafe { atomic_nand(self.v.get(), val, order) }
1699 concat!("Bitwise \"or\" with the current value.
1701 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1702 sets the new value to the result.
1704 Returns the previous value.
1706 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1707 of this operation. All ordering modes are possible. Note that using
1708 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1709 using [`Release`] makes the load part [`Relaxed`].
1711 [`Ordering`]: enum.Ordering.html
1712 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1713 [`Release`]: enum.Ordering.html#variant.Release
1714 [`Acquire`]: enum.Ordering.html#variant.Acquire
1719 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1721 let foo = ", stringify!($atomic_type), "::new(0b101101);
1722 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1723 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1728 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1729 unsafe { atomic_or(self.v.get(), val, order) }
1734 concat!("Bitwise \"xor\" with the current value.
1736 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1737 sets the new value to the result.
1739 Returns the previous value.
1741 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1742 of this operation. All ordering modes are possible. Note that using
1743 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1744 using [`Release`] makes the load part [`Relaxed`].
1746 [`Ordering`]: enum.Ordering.html
1747 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1748 [`Release`]: enum.Ordering.html#variant.Release
1749 [`Acquire`]: enum.Ordering.html#variant.Acquire
1754 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1756 let foo = ", stringify!($atomic_type), "::new(0b101101);
1757 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1758 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1763 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1764 unsafe { atomic_xor(self.v.get(), val, order) }
1769 concat!("Fetches the value, and applies a function to it that returns an optional
1770 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1771 `Err(previous_value)`.
1773 Note: This may call the function multiple times if the value has been changed from other threads in
1774 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1775 but once to the stored value.
1777 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1778 ordering of this operation. The first describes the required ordering for loads
1779 and failed updates while the second describes the required ordering when the
1780 operation finally succeeds. Beware that this is different from the two
1781 modes in [`compare_exchange`]!
1783 Using [`Acquire`] as success ordering makes the store part
1784 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1785 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1786 and must be equivalent to or weaker than the success ordering.
1788 [`bool`]: ../../../std/primitive.bool.html
1789 [`compare_exchange`]: #method.compare_exchange
1790 [`Ordering`]: enum.Ordering.html
1791 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1792 [`Release`]: enum.Ordering.html#variant.Release
1793 [`Acquire`]: enum.Ordering.html#variant.Acquire
1794 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1799 #![feature(no_more_cas)]
1800 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1802 let x = ", stringify!($atomic_type), "::new(7);
1803 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1804 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1805 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1806 assert_eq!(x.load(Ordering::SeqCst), 9);
1809 #[unstable(feature = "no_more_cas",
1810 reason = "no more CAS loops in user code",
1813 pub fn fetch_update<F>(&self,
1815 fetch_order: Ordering,
1816 set_order: Ordering) -> Result<$int_type, $int_type>
1817 where F: FnMut($int_type) -> Option<$int_type> {
1818 let mut prev = self.load(fetch_order);
1819 while let Some(next) = f(prev) {
1820 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1821 x @ Ok(_) => return x,
1822 Err(next_prev) => prev = next_prev
1830 concat!("Maximum with the current value.
1832 Finds the maximum of the current value and the argument `val`, and
1833 sets the new value to the result.
1835 Returns the previous value.
1837 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1838 of this operation. All ordering modes are possible. Note that using
1839 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1840 using [`Release`] makes the load part [`Relaxed`].
1842 [`Ordering`]: enum.Ordering.html
1843 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1844 [`Release`]: enum.Ordering.html#variant.Release
1845 [`Acquire`]: enum.Ordering.html#variant.Acquire
1850 #![feature(atomic_min_max)]
1851 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1853 let foo = ", stringify!($atomic_type), "::new(23);
1854 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1855 assert_eq!(foo.load(Ordering::SeqCst), 42);
1858 If you want to obtain the maximum value in one step, you can use the following:
1861 #![feature(atomic_min_max)]
1862 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1864 let foo = ", stringify!($atomic_type), "::new(23);
1866 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1867 assert!(max_foo == 42);
1870 #[unstable(feature = "atomic_min_max",
1871 reason = "easier and faster min/max than writing manual CAS loop",
1874 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1875 unsafe { $max_fn(self.v.get(), val, order) }
1880 concat!("Minimum with the current value.
1882 Finds the minimum of the current value and the argument `val`, and
1883 sets the new value to the result.
1885 Returns the previous value.
1887 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1888 of this operation. All ordering modes are possible. Note that using
1889 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1890 using [`Release`] makes the load part [`Relaxed`].
1892 [`Ordering`]: enum.Ordering.html
1893 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1894 [`Release`]: enum.Ordering.html#variant.Release
1895 [`Acquire`]: enum.Ordering.html#variant.Acquire
1900 #![feature(atomic_min_max)]
1901 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1903 let foo = ", stringify!($atomic_type), "::new(23);
1904 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1905 assert_eq!(foo.load(Ordering::Relaxed), 23);
1906 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1907 assert_eq!(foo.load(Ordering::Relaxed), 22);
1910 If you want to obtain the minimum value in one step, you can use the following:
1913 #![feature(atomic_min_max)]
1914 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1916 let foo = ", stringify!($atomic_type), "::new(23);
1918 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1919 assert_eq!(min_foo, 12);
1922 #[unstable(feature = "atomic_min_max",
1923 reason = "easier and faster min/max than writing manual CAS loop",
1926 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1927 unsafe { $min_fn(self.v.get(), val, order) }
1932 concat!("Returns a mutable pointer to the underlying integer.
1934 Doing non-atomic reads and writes on the resulting integer can be a data race.
1935 This method is mostly useful for FFI, where the function signature may use
1936 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1938 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1939 atomic types work with interior mutability. All modifications of an atomic change the value
1940 through a shared reference, and can do so safely as long as they use atomic operations. Any
1941 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
1942 restriction: operations on it must be atomic.
1946 ```ignore (extern-declaration)
1948 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1951 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1954 let mut atomic = ", stringify!($atomic_type), "::new(1);
1956 my_atomic_op(atomic.as_mut_ptr());
1961 #[unstable(feature = "atomic_mut_ptr",
1962 reason = "recently added",
1964 pub fn as_mut_ptr(&self) -> *mut $int_type {
1972 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1974 cfg(target_has_atomic = "8"),
1975 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1976 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1977 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1978 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1979 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1980 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1981 unstable(feature = "integer_atomics", issue = "32976"),
1982 "i8", "../../../std/primitive.i8.html",
1984 atomic_min, atomic_max,
1987 i8 AtomicI8 ATOMIC_I8_INIT
1989 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
1991 cfg(target_has_atomic = "8"),
1992 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1993 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1994 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1995 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1996 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1997 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1998 unstable(feature = "integer_atomics", issue = "32976"),
1999 "u8", "../../../std/primitive.u8.html",
2001 atomic_umin, atomic_umax,
2004 u8 AtomicU8 ATOMIC_U8_INIT
2006 #[cfg(any(bootstrap, target_has_atomic_load_store = "16"))]
2008 cfg(target_has_atomic = "16"),
2009 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2010 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2011 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2012 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2013 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2014 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2015 unstable(feature = "integer_atomics", issue = "32976"),
2016 "i16", "../../../std/primitive.i16.html",
2018 atomic_min, atomic_max,
2020 "AtomicI16::new(0)",
2021 i16 AtomicI16 ATOMIC_I16_INIT
2023 #[cfg(any(bootstrap, target_has_atomic_load_store = "16"))]
2025 cfg(target_has_atomic = "16"),
2026 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2027 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2028 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2029 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2030 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2031 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2032 unstable(feature = "integer_atomics", issue = "32976"),
2033 "u16", "../../../std/primitive.u16.html",
2035 atomic_umin, atomic_umax,
2037 "AtomicU16::new(0)",
2038 u16 AtomicU16 ATOMIC_U16_INIT
2040 #[cfg(any(bootstrap, target_has_atomic_load_store = "32"))]
2042 cfg(target_has_atomic = "32"),
2043 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2044 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2045 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2046 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2047 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2048 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2049 unstable(feature = "integer_atomics", issue = "32976"),
2050 "i32", "../../../std/primitive.i32.html",
2052 atomic_min, atomic_max,
2054 "AtomicI32::new(0)",
2055 i32 AtomicI32 ATOMIC_I32_INIT
2057 #[cfg(any(bootstrap, target_has_atomic_load_store = "32"))]
2059 cfg(target_has_atomic = "32"),
2060 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2061 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2062 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2063 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2064 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2065 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2066 unstable(feature = "integer_atomics", issue = "32976"),
2067 "u32", "../../../std/primitive.u32.html",
2069 atomic_umin, atomic_umax,
2071 "AtomicU32::new(0)",
2072 u32 AtomicU32 ATOMIC_U32_INIT
2075 all(bootstrap, target_has_atomic = "64"),
2076 target_has_atomic_load_store = "64"
2079 cfg(target_has_atomic = "64"),
2080 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2081 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2082 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2083 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2084 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2085 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2086 unstable(feature = "integer_atomics", issue = "32976"),
2087 "i64", "../../../std/primitive.i64.html",
2089 atomic_min, atomic_max,
2091 "AtomicI64::new(0)",
2092 i64 AtomicI64 ATOMIC_I64_INIT
2095 all(bootstrap, target_has_atomic = "64"),
2096 target_has_atomic_load_store = "64"
2099 cfg(target_has_atomic = "64"),
2100 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2101 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2102 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2103 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2104 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2105 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2106 unstable(feature = "integer_atomics", issue = "32976"),
2107 "u64", "../../../std/primitive.u64.html",
2109 atomic_umin, atomic_umax,
2111 "AtomicU64::new(0)",
2112 u64 AtomicU64 ATOMIC_U64_INIT
2114 #[cfg(target_has_atomic_load_store = "128")]
2116 cfg(target_has_atomic = "128"),
2117 unstable(feature = "integer_atomics", issue = "32976"),
2118 unstable(feature = "integer_atomics", issue = "32976"),
2119 unstable(feature = "integer_atomics", issue = "32976"),
2120 unstable(feature = "integer_atomics", issue = "32976"),
2121 unstable(feature = "integer_atomics", issue = "32976"),
2122 unstable(feature = "integer_atomics", issue = "32976"),
2123 unstable(feature = "integer_atomics", issue = "32976"),
2124 "i128", "../../../std/primitive.i128.html",
2125 "#![feature(integer_atomics)]\n\n",
2126 atomic_min, atomic_max,
2128 "AtomicI128::new(0)",
2129 i128 AtomicI128 ATOMIC_I128_INIT
2131 #[cfg(target_has_atomic_load_store = "128")]
2133 cfg(target_has_atomic = "128"),
2134 unstable(feature = "integer_atomics", issue = "32976"),
2135 unstable(feature = "integer_atomics", issue = "32976"),
2136 unstable(feature = "integer_atomics", issue = "32976"),
2137 unstable(feature = "integer_atomics", issue = "32976"),
2138 unstable(feature = "integer_atomics", issue = "32976"),
2139 unstable(feature = "integer_atomics", issue = "32976"),
2140 unstable(feature = "integer_atomics", issue = "32976"),
2141 "u128", "../../../std/primitive.u128.html",
2142 "#![feature(integer_atomics)]\n\n",
2143 atomic_umin, atomic_umax,
2145 "AtomicU128::new(0)",
2146 u128 AtomicU128 ATOMIC_U128_INIT
2148 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2149 #[cfg(target_pointer_width = "16")]
2150 macro_rules! ptr_width {
2153 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2154 #[cfg(target_pointer_width = "32")]
2155 macro_rules! ptr_width {
2158 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2159 #[cfg(target_pointer_width = "64")]
2160 macro_rules! ptr_width {
2163 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2165 cfg(target_has_atomic = "ptr"),
2166 stable(feature = "rust1", since = "1.0.0"),
2167 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2168 stable(feature = "atomic_debug", since = "1.3.0"),
2169 stable(feature = "atomic_access", since = "1.15.0"),
2170 stable(feature = "atomic_from", since = "1.23.0"),
2171 stable(feature = "atomic_nand", since = "1.27.0"),
2172 stable(feature = "rust1", since = "1.0.0"),
2173 "isize", "../../../std/primitive.isize.html",
2175 atomic_min, atomic_max,
2177 "AtomicIsize::new(0)",
2178 isize AtomicIsize ATOMIC_ISIZE_INIT
2180 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2182 cfg(target_has_atomic = "ptr"),
2183 stable(feature = "rust1", since = "1.0.0"),
2184 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2185 stable(feature = "atomic_debug", since = "1.3.0"),
2186 stable(feature = "atomic_access", since = "1.15.0"),
2187 stable(feature = "atomic_from", since = "1.23.0"),
2188 stable(feature = "atomic_nand", since = "1.27.0"),
2189 stable(feature = "rust1", since = "1.0.0"),
2190 "usize", "../../../std/primitive.usize.html",
2192 atomic_umin, atomic_umax,
2194 "AtomicUsize::new(0)",
2195 usize AtomicUsize ATOMIC_USIZE_INIT
2199 #[cfg(target_has_atomic = "8")]
2200 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2211 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2213 Release => intrinsics::atomic_store_rel(dst, val),
2214 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2215 SeqCst => intrinsics::atomic_store(dst, val),
2216 Acquire => panic!("there is no such thing as an acquire store"),
2217 AcqRel => panic!("there is no such thing as an acquire/release store"),
2222 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2224 Acquire => intrinsics::atomic_load_acq(dst),
2225 Relaxed => intrinsics::atomic_load_relaxed(dst),
2226 SeqCst => intrinsics::atomic_load(dst),
2227 Release => panic!("there is no such thing as a release load"),
2228 AcqRel => panic!("there is no such thing as an acquire/release load"),
2233 #[cfg(target_has_atomic = "8")]
2234 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2236 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2237 Release => intrinsics::atomic_xchg_rel(dst, val),
2238 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2239 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2240 SeqCst => intrinsics::atomic_xchg(dst, val),
2244 /// Returns the previous value (like __sync_fetch_and_add).
2246 #[cfg(target_has_atomic = "8")]
2247 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2249 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2250 Release => intrinsics::atomic_xadd_rel(dst, val),
2251 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2252 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2253 SeqCst => intrinsics::atomic_xadd(dst, val),
2257 /// Returns the previous value (like __sync_fetch_and_sub).
2259 #[cfg(target_has_atomic = "8")]
2260 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2262 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2263 Release => intrinsics::atomic_xsub_rel(dst, val),
2264 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2265 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2266 SeqCst => intrinsics::atomic_xsub(dst, val),
2271 #[cfg(target_has_atomic = "8")]
2272 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
2278 let (val, ok) = match (success, failure) {
2279 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2280 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2281 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2282 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2283 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2284 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2285 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2286 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2287 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2288 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2289 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2290 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2292 if ok { Ok(val) } else { Err(val) }
2296 #[cfg(target_has_atomic = "8")]
2297 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
2303 let (val, ok) = match (success, failure) {
2304 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2305 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2306 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2307 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2308 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2309 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2310 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2311 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2312 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2313 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2314 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2315 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2317 if ok { Ok(val) } else { Err(val) }
2321 #[cfg(target_has_atomic = "8")]
2322 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2324 Acquire => intrinsics::atomic_and_acq(dst, val),
2325 Release => intrinsics::atomic_and_rel(dst, val),
2326 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2327 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2328 SeqCst => intrinsics::atomic_and(dst, val),
2333 #[cfg(target_has_atomic = "8")]
2334 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2336 Acquire => intrinsics::atomic_nand_acq(dst, val),
2337 Release => intrinsics::atomic_nand_rel(dst, val),
2338 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2339 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2340 SeqCst => intrinsics::atomic_nand(dst, val),
2345 #[cfg(target_has_atomic = "8")]
2346 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2348 Acquire => intrinsics::atomic_or_acq(dst, val),
2349 Release => intrinsics::atomic_or_rel(dst, val),
2350 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2351 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2352 SeqCst => intrinsics::atomic_or(dst, val),
2357 #[cfg(target_has_atomic = "8")]
2358 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2360 Acquire => intrinsics::atomic_xor_acq(dst, val),
2361 Release => intrinsics::atomic_xor_rel(dst, val),
2362 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2363 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2364 SeqCst => intrinsics::atomic_xor(dst, val),
2368 /// returns the max value (signed comparison)
2370 #[cfg(target_has_atomic = "8")]
2371 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2373 Acquire => intrinsics::atomic_max_acq(dst, val),
2374 Release => intrinsics::atomic_max_rel(dst, val),
2375 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2376 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2377 SeqCst => intrinsics::atomic_max(dst, val),
2381 /// returns the min value (signed comparison)
2383 #[cfg(target_has_atomic = "8")]
2384 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2386 Acquire => intrinsics::atomic_min_acq(dst, val),
2387 Release => intrinsics::atomic_min_rel(dst, val),
2388 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2389 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2390 SeqCst => intrinsics::atomic_min(dst, val),
2394 /// returns the max value (signed comparison)
2396 #[cfg(target_has_atomic = "8")]
2397 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2399 Acquire => intrinsics::atomic_umax_acq(dst, val),
2400 Release => intrinsics::atomic_umax_rel(dst, val),
2401 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2402 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2403 SeqCst => intrinsics::atomic_umax(dst, val),
2407 /// returns the min value (signed comparison)
2409 #[cfg(target_has_atomic = "8")]
2410 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2412 Acquire => intrinsics::atomic_umin_acq(dst, val),
2413 Release => intrinsics::atomic_umin_rel(dst, val),
2414 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2415 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2416 SeqCst => intrinsics::atomic_umin(dst, val),
2420 /// An atomic fence.
2422 /// Depending on the specified order, a fence prevents the compiler and CPU from
2423 /// reordering certain types of memory operations around it.
2424 /// That creates synchronizes-with relationships between it and atomic operations
2425 /// or fences in other threads.
2427 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2428 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2429 /// exist operations X and Y, both operating on some atomic object 'M' such
2430 /// that A is sequenced before X, Y is synchronized before B and Y observes
2431 /// the change to M. This provides a happens-before dependence between A and B.
2434 /// Thread 1 Thread 2
2436 /// fence(Release); A --------------
2437 /// x.store(3, Relaxed); X --------- |
2440 /// -------------> Y if x.load(Relaxed) == 3 {
2441 /// |-------> B fence(Acquire);
2446 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2449 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2450 /// and [`Release`] semantics, participates in the global program order of the
2451 /// other [`SeqCst`] operations and/or fences.
2453 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2457 /// Panics if `order` is [`Relaxed`].
2462 /// use std::sync::atomic::AtomicBool;
2463 /// use std::sync::atomic::fence;
2464 /// use std::sync::atomic::Ordering;
2466 /// // A mutual exclusion primitive based on spinlock.
2467 /// pub struct Mutex {
2468 /// flag: AtomicBool,
2472 /// pub fn new() -> Mutex {
2474 /// flag: AtomicBool::new(false),
2478 /// pub fn lock(&self) {
2479 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2480 /// // This fence synchronizes-with store in `unlock`.
2481 /// fence(Ordering::Acquire);
2484 /// pub fn unlock(&self) {
2485 /// self.flag.store(false, Ordering::Release);
2490 /// [`Ordering`]: enum.Ordering.html
2491 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2492 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2493 /// [`Release`]: enum.Ordering.html#variant.Release
2494 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2495 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2497 #[stable(feature = "rust1", since = "1.0.0")]
2498 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2499 pub fn fence(order: Ordering) {
2500 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2501 // they will cause LLVM to abort. The wasm instruction set doesn't have
2502 // fences right now. There's discussion online about the best way for tools
2503 // to conventionally implement fences at
2504 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2505 // follow that discussion and implement a solution when one comes about!
2506 #[cfg(not(target_arch = "wasm32"))]
2509 Acquire => intrinsics::atomic_fence_acq(),
2510 Release => intrinsics::atomic_fence_rel(),
2511 AcqRel => intrinsics::atomic_fence_acqrel(),
2512 SeqCst => intrinsics::atomic_fence(),
2513 Relaxed => panic!("there is no such thing as a relaxed fence"),
2519 /// A compiler memory fence.
2521 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2522 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2523 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2524 /// or writes from before or after the call to the other side of the call to
2525 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2526 /// from doing such re-ordering. This is not a problem in a single-threaded,
2527 /// execution context, but when other threads may modify memory at the same
2528 /// time, stronger synchronization primitives such as [`fence`] are required.
2530 /// The re-ordering prevented by the different ordering semantics are:
2532 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2533 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2534 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2535 /// - with [`AcqRel`], both of the above rules are enforced.
2537 /// `compiler_fence` is generally only useful for preventing a thread from
2538 /// racing *with itself*. That is, if a given thread is executing one piece
2539 /// of code, and is then interrupted, and starts executing code elsewhere
2540 /// (while still in the same thread, and conceptually still on the same
2541 /// core). In traditional programs, this can only occur when a signal
2542 /// handler is registered. In more low-level code, such situations can also
2543 /// arise when handling interrupts, when implementing green threads with
2544 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2545 /// discussion of [memory barriers].
2549 /// Panics if `order` is [`Relaxed`].
2553 /// Without `compiler_fence`, the `assert_eq!` in following code
2554 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2555 /// To see why, remember that the compiler is free to swap the stores to
2556 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2557 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2558 /// after `IS_READY` is updated, then the signal handler will see
2559 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2560 /// Using a `compiler_fence` remedies this situation.
2563 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2564 /// use std::sync::atomic::Ordering;
2565 /// use std::sync::atomic::compiler_fence;
2567 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2568 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2571 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2572 /// // prevent earlier writes from being moved beyond this point
2573 /// compiler_fence(Ordering::Release);
2574 /// IS_READY.store(true, Ordering::Relaxed);
2577 /// fn signal_handler() {
2578 /// if IS_READY.load(Ordering::Relaxed) {
2579 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2584 /// [`fence`]: fn.fence.html
2585 /// [`Ordering`]: enum.Ordering.html
2586 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2587 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2588 /// [`Release`]: enum.Ordering.html#variant.Release
2589 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2590 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2591 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2593 #[stable(feature = "compiler_fences", since = "1.21.0")]
2594 pub fn compiler_fence(order: Ordering) {
2597 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2598 Release => intrinsics::atomic_singlethreadfence_rel(),
2599 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2600 SeqCst => intrinsics::atomic_singlethreadfence(),
2601 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2607 #[cfg(any(bootstrap, target_has_atomic_load_store = "8"))]
2608 #[stable(feature = "atomic_debug", since = "1.3.0")]
2609 impl fmt::Debug for AtomicBool {
2610 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2611 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2615 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2616 #[stable(feature = "atomic_debug", since = "1.3.0")]
2617 impl<T> fmt::Debug for AtomicPtr<T> {
2618 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2619 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2623 #[cfg(any(bootstrap, target_has_atomic_load_store = "ptr"))]
2624 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2625 impl<T> fmt::Pointer for AtomicPtr<T> {
2626 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2627 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)