3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically `atomic_ref`.
8 //! Basically, creating a *shared reference* to one of the Rust atomic types corresponds to creating
9 //! an `atomic_ref` in C++; the `atomic_ref` is destroyed when the lifetime of the shared reference
10 //! ends. (A Rust atomic type that is exclusively owned or behind a mutable reference does *not*
11 //! correspond to an "atomic object" in C++, since it can be accessed via non-atomic operations.)
13 //! This module defines atomic versions of a select number of primitive
14 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
15 //! [`AtomicI8`], [`AtomicU16`], etc.
16 //! Atomic types present operations that, when used correctly, synchronize
17 //! updates between threads.
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [cpp]: https://en.cppreference.com/w/cpp/atomic
24 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
25 //! [2]: ../../../nomicon/atomics.html
27 //! Atomic variables are safe to share between threads (they implement [`Sync`])
28 //! but they do not themselves provide the mechanism for sharing and follow the
29 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
30 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
31 //! atomically-reference-counted shared pointer).
33 //! [arc]: ../../../std/sync/struct.Arc.html
35 //! Atomic types may be stored in static variables, initialized using
36 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
37 //! are often used for lazy global initialization.
41 //! All atomic types in this module are guaranteed to be [lock-free] if they're
42 //! available. This means they don't internally acquire a global mutex. Atomic
43 //! types and operations are not guaranteed to be wait-free. This means that
44 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
46 //! Atomic operations may be implemented at the instruction layer with
47 //! larger-size atomics. For example some platforms use 4-byte atomic
48 //! instructions to implement `AtomicI8`. Note that this emulation should not
49 //! have an impact on correctness of code, it's just something to be aware of.
51 //! The atomic types in this module might not be available on all platforms. The
52 //! atomic types here are all widely available, however, and can generally be
53 //! relied upon existing. Some notable exceptions are:
55 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
56 //! `AtomicI64` types.
57 //! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
58 //! and `store` operations, and do not support Compare and Swap (CAS)
59 //! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
60 //! these CAS operations are implemented via [operating system support], which
61 //! may come with a performance penalty.
62 //! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
63 //! and do not support Compare and Swap (CAS) operations, such as `swap`,
66 //! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
68 //! Note that future platforms may be added that also do not have support for
69 //! some atomic operations. Maximally portable code will want to be careful
70 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
71 //! generally the most portable, but even then they're not available everywhere.
72 //! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although
75 //! The `#[cfg(target_has_atomic)]` attribute can be used to conditionally
76 //! compile based on the target's supported bit widths. It is a key-value
77 //! option set for each supported size, with values "8", "16", "32", "64",
78 //! "128", and "ptr" for pointer-sized atomics.
80 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
84 //! A simple spinlock:
87 //! use std::sync::Arc;
88 //! use std::sync::atomic::{AtomicUsize, Ordering};
89 //! use std::{hint, thread};
92 //! let spinlock = Arc::new(AtomicUsize::new(1));
94 //! let spinlock_clone = Arc::clone(&spinlock);
95 //! let thread = thread::spawn(move|| {
96 //! spinlock_clone.store(0, Ordering::SeqCst);
99 //! // Wait for the other thread to release the lock
100 //! while spinlock.load(Ordering::SeqCst) != 0 {
101 //! hint::spin_loop();
104 //! if let Err(panic) = thread.join() {
105 //! println!("Thread had an error: {panic:?}");
110 //! Keep a global count of live threads:
113 //! use std::sync::atomic::{AtomicUsize, Ordering};
115 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
117 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
118 //! println!("live threads: {}", old_thread_count + 1);
121 #![stable(feature = "rust1", since = "1.0.0")]
122 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
123 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
124 #![rustc_diagnostic_item = "atomic_mod"]
126 use self::Ordering::*;
128 use crate::cell::UnsafeCell;
130 use crate::intrinsics;
132 use crate::hint::spin_loop;
134 /// A boolean type which can be safely shared between threads.
136 /// This type has the same in-memory representation as a [`bool`].
138 /// **Note**: This type is only available on platforms that support atomic
139 /// loads and stores of `u8`.
140 #[cfg(target_has_atomic_load_store = "8")]
141 #[stable(feature = "rust1", since = "1.0.0")]
142 #[rustc_diagnostic_item = "AtomicBool"]
144 pub struct AtomicBool {
148 #[cfg(target_has_atomic_load_store = "8")]
149 #[stable(feature = "rust1", since = "1.0.0")]
150 #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
151 impl const Default for AtomicBool {
152 /// Creates an `AtomicBool` initialized to `false`.
154 fn default() -> Self {
159 // Send is implicitly implemented for AtomicBool.
160 #[cfg(target_has_atomic_load_store = "8")]
161 #[stable(feature = "rust1", since = "1.0.0")]
162 unsafe impl Sync for AtomicBool {}
164 /// A raw pointer type which can be safely shared between threads.
166 /// This type has the same in-memory representation as a `*mut T`.
168 /// **Note**: This type is only available on platforms that support atomic
169 /// loads and stores of pointers. Its size depends on the target pointer's size.
170 #[cfg(target_has_atomic_load_store = "ptr")]
171 #[stable(feature = "rust1", since = "1.0.0")]
172 #[cfg_attr(not(test), rustc_diagnostic_item = "AtomicPtr")]
173 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
174 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
175 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
176 pub struct AtomicPtr<T> {
177 p: UnsafeCell<*mut T>,
180 #[cfg(target_has_atomic_load_store = "ptr")]
181 #[stable(feature = "rust1", since = "1.0.0")]
182 #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
183 impl<T> const Default for AtomicPtr<T> {
184 /// Creates a null `AtomicPtr<T>`.
185 fn default() -> AtomicPtr<T> {
186 AtomicPtr::new(crate::ptr::null_mut())
190 #[cfg(target_has_atomic_load_store = "ptr")]
191 #[stable(feature = "rust1", since = "1.0.0")]
192 unsafe impl<T> Send for AtomicPtr<T> {}
193 #[cfg(target_has_atomic_load_store = "ptr")]
194 #[stable(feature = "rust1", since = "1.0.0")]
195 unsafe impl<T> Sync for AtomicPtr<T> {}
197 /// Atomic memory orderings
199 /// Memory orderings specify the way atomic operations synchronize memory.
200 /// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
201 /// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
202 /// operations synchronize other memory while additionally preserving a total order of such
203 /// operations across all threads.
205 /// Rust's memory orderings are [the same as those of
206 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
208 /// For more information see the [nomicon].
210 /// [nomicon]: ../../../nomicon/atomics.html
211 #[stable(feature = "rust1", since = "1.0.0")]
212 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
214 #[rustc_diagnostic_item = "Ordering"]
216 /// No ordering constraints, only atomic operations.
218 /// Corresponds to [`memory_order_relaxed`] in C++20.
220 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
221 #[stable(feature = "rust1", since = "1.0.0")]
223 /// When coupled with a store, all previous operations become ordered
224 /// before any load of this value with [`Acquire`] (or stronger) ordering.
225 /// In particular, all previous writes become visible to all threads
226 /// that perform an [`Acquire`] (or stronger) load of this value.
228 /// Notice that using this ordering for an operation that combines loads
229 /// and stores leads to a [`Relaxed`] load operation!
231 /// This ordering is only applicable for operations that can perform a store.
233 /// Corresponds to [`memory_order_release`] in C++20.
235 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
236 #[stable(feature = "rust1", since = "1.0.0")]
238 /// When coupled with a load, if the loaded value was written by a store operation with
239 /// [`Release`] (or stronger) ordering, then all subsequent operations
240 /// become ordered after that store. In particular, all subsequent loads will see data
241 /// written before the store.
243 /// Notice that using this ordering for an operation that combines loads
244 /// and stores leads to a [`Relaxed`] store operation!
246 /// This ordering is only applicable for operations that can perform a load.
248 /// Corresponds to [`memory_order_acquire`] in C++20.
250 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
251 #[stable(feature = "rust1", since = "1.0.0")]
253 /// Has the effects of both [`Acquire`] and [`Release`] together:
254 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
256 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
257 /// not performing any store and hence it has just [`Acquire`] ordering. However,
258 /// `AcqRel` will never perform [`Relaxed`] accesses.
260 /// This ordering is only applicable for operations that combine both loads and stores.
262 /// Corresponds to [`memory_order_acq_rel`] in C++20.
264 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
265 #[stable(feature = "rust1", since = "1.0.0")]
267 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
268 /// operations, respectively) with the additional guarantee that all threads see all
269 /// sequentially consistent operations in the same order.
271 /// Corresponds to [`memory_order_seq_cst`] in C++20.
273 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
274 #[stable(feature = "rust1", since = "1.0.0")]
278 /// An [`AtomicBool`] initialized to `false`.
279 #[cfg(target_has_atomic_load_store = "8")]
280 #[stable(feature = "rust1", since = "1.0.0")]
283 note = "the `new` function is now preferred",
284 suggestion = "AtomicBool::new(false)"
286 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
288 #[cfg(target_has_atomic_load_store = "8")]
290 /// Creates a new `AtomicBool`.
295 /// use std::sync::atomic::AtomicBool;
297 /// let atomic_true = AtomicBool::new(true);
298 /// let atomic_false = AtomicBool::new(false);
301 #[stable(feature = "rust1", since = "1.0.0")]
302 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
304 pub const fn new(v: bool) -> AtomicBool {
305 AtomicBool { v: UnsafeCell::new(v as u8) }
308 /// Returns a mutable reference to the underlying [`bool`].
310 /// This is safe because the mutable reference guarantees that no other threads are
311 /// concurrently accessing the atomic data.
316 /// use std::sync::atomic::{AtomicBool, Ordering};
318 /// let mut some_bool = AtomicBool::new(true);
319 /// assert_eq!(*some_bool.get_mut(), true);
320 /// *some_bool.get_mut() = false;
321 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
324 #[stable(feature = "atomic_access", since = "1.15.0")]
325 pub fn get_mut(&mut self) -> &mut bool {
326 // SAFETY: the mutable reference guarantees unique ownership.
327 unsafe { &mut *(self.v.get() as *mut bool) }
330 /// Get atomic access to a `&mut bool`.
335 /// #![feature(atomic_from_mut)]
336 /// use std::sync::atomic::{AtomicBool, Ordering};
338 /// let mut some_bool = true;
339 /// let a = AtomicBool::from_mut(&mut some_bool);
340 /// a.store(false, Ordering::Relaxed);
341 /// assert_eq!(some_bool, false);
344 #[cfg(target_has_atomic_equal_alignment = "8")]
345 #[unstable(feature = "atomic_from_mut", issue = "76314")]
346 pub fn from_mut(v: &mut bool) -> &mut Self {
347 // SAFETY: the mutable reference guarantees unique ownership, and
348 // alignment of both `bool` and `Self` is 1.
349 unsafe { &mut *(v as *mut bool as *mut Self) }
352 /// Get non-atomic access to a `&mut [AtomicBool]` slice.
354 /// This is safe because the mutable reference guarantees that no other threads are
355 /// concurrently accessing the atomic data.
360 /// #![feature(atomic_from_mut, inline_const)]
361 /// use std::sync::atomic::{AtomicBool, Ordering};
363 /// let mut some_bools = [const { AtomicBool::new(false) }; 10];
365 /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools);
366 /// assert_eq!(view, [false; 10]);
367 /// view[..5].copy_from_slice(&[true; 5]);
369 /// std::thread::scope(|s| {
370 /// for t in &some_bools[..5] {
371 /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true));
374 /// for f in &some_bools[5..] {
375 /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false));
380 #[unstable(feature = "atomic_from_mut", issue = "76314")]
381 pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] {
382 // SAFETY: the mutable reference guarantees unique ownership.
383 unsafe { &mut *(this as *mut [Self] as *mut [bool]) }
386 /// Get atomic access to a `&mut [bool]` slice.
391 /// #![feature(atomic_from_mut)]
392 /// use std::sync::atomic::{AtomicBool, Ordering};
394 /// let mut some_bools = [false; 10];
395 /// let a = &*AtomicBool::from_mut_slice(&mut some_bools);
396 /// std::thread::scope(|s| {
397 /// for i in 0..a.len() {
398 /// s.spawn(move || a[i].store(true, Ordering::Relaxed));
401 /// assert_eq!(some_bools, [true; 10]);
404 #[cfg(target_has_atomic_equal_alignment = "8")]
405 #[unstable(feature = "atomic_from_mut", issue = "76314")]
406 pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] {
407 // SAFETY: the mutable reference guarantees unique ownership, and
408 // alignment of both `bool` and `Self` is 1.
409 unsafe { &mut *(v as *mut [bool] as *mut [Self]) }
412 /// Consumes the atomic and returns the contained value.
414 /// This is safe because passing `self` by value guarantees that no other threads are
415 /// concurrently accessing the atomic data.
420 /// use std::sync::atomic::AtomicBool;
422 /// let some_bool = AtomicBool::new(true);
423 /// assert_eq!(some_bool.into_inner(), true);
426 #[stable(feature = "atomic_access", since = "1.15.0")]
427 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
428 pub const fn into_inner(self) -> bool {
429 self.v.into_inner() != 0
432 /// Loads a value from the bool.
434 /// `load` takes an [`Ordering`] argument which describes the memory ordering
435 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
439 /// Panics if `order` is [`Release`] or [`AcqRel`].
444 /// use std::sync::atomic::{AtomicBool, Ordering};
446 /// let some_bool = AtomicBool::new(true);
448 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
451 #[stable(feature = "rust1", since = "1.0.0")]
452 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
453 pub fn load(&self, order: Ordering) -> bool {
454 // SAFETY: any data races are prevented by atomic intrinsics and the raw
455 // pointer passed in is valid because we got it from a reference.
456 unsafe { atomic_load(self.v.get(), order) != 0 }
459 /// Stores a value into the bool.
461 /// `store` takes an [`Ordering`] argument which describes the memory ordering
462 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
466 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
471 /// use std::sync::atomic::{AtomicBool, Ordering};
473 /// let some_bool = AtomicBool::new(true);
475 /// some_bool.store(false, Ordering::Relaxed);
476 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
479 #[stable(feature = "rust1", since = "1.0.0")]
480 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
481 pub fn store(&self, val: bool, order: Ordering) {
482 // SAFETY: any data races are prevented by atomic intrinsics and the raw
483 // pointer passed in is valid because we got it from a reference.
485 atomic_store(self.v.get(), val as u8, order);
489 /// Stores a value into the bool, returning the previous value.
491 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
492 /// of this operation. All ordering modes are possible. Note that using
493 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
494 /// using [`Release`] makes the load part [`Relaxed`].
496 /// **Note:** This method is only available on platforms that support atomic
497 /// operations on `u8`.
502 /// use std::sync::atomic::{AtomicBool, Ordering};
504 /// let some_bool = AtomicBool::new(true);
506 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
507 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
510 #[stable(feature = "rust1", since = "1.0.0")]
511 #[cfg(target_has_atomic = "8")]
512 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
513 pub fn swap(&self, val: bool, order: Ordering) -> bool {
514 // SAFETY: data races are prevented by atomic intrinsics.
515 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
518 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
520 /// The return value is always the previous value. If it is equal to `current`, then the value
523 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
524 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
525 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
526 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
527 /// happens, and using [`Release`] makes the load part [`Relaxed`].
529 /// **Note:** This method is only available on platforms that support atomic
530 /// operations on `u8`.
532 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
534 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
535 /// memory orderings:
537 /// Original | Success | Failure
538 /// -------- | ------- | -------
539 /// Relaxed | Relaxed | Relaxed
540 /// Acquire | Acquire | Acquire
541 /// Release | Release | Relaxed
542 /// AcqRel | AcqRel | Acquire
543 /// SeqCst | SeqCst | SeqCst
545 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
546 /// which allows the compiler to generate better assembly code when the compare and swap
547 /// is used in a loop.
552 /// use std::sync::atomic::{AtomicBool, Ordering};
554 /// let some_bool = AtomicBool::new(true);
556 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
557 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
559 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
560 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
563 #[stable(feature = "rust1", since = "1.0.0")]
566 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
568 #[cfg(target_has_atomic = "8")]
569 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
570 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
571 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
577 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
579 /// The return value is a result indicating whether the new value was written and containing
580 /// the previous value. On success this value is guaranteed to be equal to `current`.
582 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
583 /// ordering of this operation. `success` describes the required ordering for the
584 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
585 /// `failure` describes the required ordering for the load operation that takes place when
586 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
587 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
588 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
590 /// **Note:** This method is only available on platforms that support atomic
591 /// operations on `u8`.
596 /// use std::sync::atomic::{AtomicBool, Ordering};
598 /// let some_bool = AtomicBool::new(true);
600 /// assert_eq!(some_bool.compare_exchange(true,
602 /// Ordering::Acquire,
603 /// Ordering::Relaxed),
605 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
607 /// assert_eq!(some_bool.compare_exchange(true, true,
608 /// Ordering::SeqCst,
609 /// Ordering::Acquire),
611 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
614 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
615 #[doc(alias = "compare_and_swap")]
616 #[cfg(target_has_atomic = "8")]
617 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
618 pub fn compare_exchange(
624 ) -> Result<bool, bool> {
625 // SAFETY: data races are prevented by atomic intrinsics.
627 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
630 Err(x) => Err(x != 0),
634 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
636 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
637 /// comparison succeeds, which can result in more efficient code on some platforms. The
638 /// return value is a result indicating whether the new value was written and containing the
641 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
642 /// ordering of this operation. `success` describes the required ordering for the
643 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
644 /// `failure` describes the required ordering for the load operation that takes place when
645 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
646 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
647 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
649 /// **Note:** This method is only available on platforms that support atomic
650 /// operations on `u8`.
655 /// use std::sync::atomic::{AtomicBool, Ordering};
657 /// let val = AtomicBool::new(false);
660 /// let mut old = val.load(Ordering::Relaxed);
662 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
664 /// Err(x) => old = x,
669 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
670 #[doc(alias = "compare_and_swap")]
671 #[cfg(target_has_atomic = "8")]
672 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
673 pub fn compare_exchange_weak(
679 ) -> Result<bool, bool> {
680 // SAFETY: data races are prevented by atomic intrinsics.
682 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
685 Err(x) => Err(x != 0),
689 /// Logical "and" with a boolean value.
691 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
692 /// the new value to the result.
694 /// Returns the previous value.
696 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
697 /// of this operation. All ordering modes are possible. Note that using
698 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
699 /// using [`Release`] makes the load part [`Relaxed`].
701 /// **Note:** This method is only available on platforms that support atomic
702 /// operations on `u8`.
707 /// use std::sync::atomic::{AtomicBool, Ordering};
709 /// let foo = AtomicBool::new(true);
710 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
711 /// assert_eq!(foo.load(Ordering::SeqCst), false);
713 /// let foo = AtomicBool::new(true);
714 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
715 /// assert_eq!(foo.load(Ordering::SeqCst), true);
717 /// let foo = AtomicBool::new(false);
718 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
719 /// assert_eq!(foo.load(Ordering::SeqCst), false);
722 #[stable(feature = "rust1", since = "1.0.0")]
723 #[cfg(target_has_atomic = "8")]
724 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
725 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
726 // SAFETY: data races are prevented by atomic intrinsics.
727 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
730 /// Logical "nand" with a boolean value.
732 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
733 /// the new value to the result.
735 /// Returns the previous value.
737 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
738 /// of this operation. All ordering modes are possible. Note that using
739 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
740 /// using [`Release`] makes the load part [`Relaxed`].
742 /// **Note:** This method is only available on platforms that support atomic
743 /// operations on `u8`.
748 /// use std::sync::atomic::{AtomicBool, Ordering};
750 /// let foo = AtomicBool::new(true);
751 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
752 /// assert_eq!(foo.load(Ordering::SeqCst), true);
754 /// let foo = AtomicBool::new(true);
755 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
756 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
757 /// assert_eq!(foo.load(Ordering::SeqCst), false);
759 /// let foo = AtomicBool::new(false);
760 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
761 /// assert_eq!(foo.load(Ordering::SeqCst), true);
764 #[stable(feature = "rust1", since = "1.0.0")]
765 #[cfg(target_has_atomic = "8")]
766 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
767 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
768 // We can't use atomic_nand here because it can result in a bool with
769 // an invalid value. This happens because the atomic operation is done
770 // with an 8-bit integer internally, which would set the upper 7 bits.
771 // So we just use fetch_xor or swap instead.
774 // We must invert the bool.
775 self.fetch_xor(true, order)
777 // !(x & false) == true
778 // We must set the bool to true.
779 self.swap(true, order)
783 /// Logical "or" with a boolean value.
785 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
786 /// new value to the result.
788 /// Returns the previous value.
790 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
791 /// of this operation. All ordering modes are possible. Note that using
792 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
793 /// using [`Release`] makes the load part [`Relaxed`].
795 /// **Note:** This method is only available on platforms that support atomic
796 /// operations on `u8`.
801 /// use std::sync::atomic::{AtomicBool, Ordering};
803 /// let foo = AtomicBool::new(true);
804 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
805 /// assert_eq!(foo.load(Ordering::SeqCst), true);
807 /// let foo = AtomicBool::new(true);
808 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
809 /// assert_eq!(foo.load(Ordering::SeqCst), true);
811 /// let foo = AtomicBool::new(false);
812 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
813 /// assert_eq!(foo.load(Ordering::SeqCst), false);
816 #[stable(feature = "rust1", since = "1.0.0")]
817 #[cfg(target_has_atomic = "8")]
818 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
819 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
820 // SAFETY: data races are prevented by atomic intrinsics.
821 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
824 /// Logical "xor" with a boolean value.
826 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
827 /// the new value to the result.
829 /// Returns the previous value.
831 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
832 /// of this operation. All ordering modes are possible. Note that using
833 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
834 /// using [`Release`] makes the load part [`Relaxed`].
836 /// **Note:** This method is only available on platforms that support atomic
837 /// operations on `u8`.
842 /// use std::sync::atomic::{AtomicBool, Ordering};
844 /// let foo = AtomicBool::new(true);
845 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
846 /// assert_eq!(foo.load(Ordering::SeqCst), true);
848 /// let foo = AtomicBool::new(true);
849 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
850 /// assert_eq!(foo.load(Ordering::SeqCst), false);
852 /// let foo = AtomicBool::new(false);
853 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
854 /// assert_eq!(foo.load(Ordering::SeqCst), false);
857 #[stable(feature = "rust1", since = "1.0.0")]
858 #[cfg(target_has_atomic = "8")]
859 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
860 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
861 // SAFETY: data races are prevented by atomic intrinsics.
862 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
865 /// Logical "not" with a boolean value.
867 /// Performs a logical "not" operation on the current value, and sets
868 /// the new value to the result.
870 /// Returns the previous value.
872 /// `fetch_not` takes an [`Ordering`] argument which describes the memory ordering
873 /// of this operation. All ordering modes are possible. Note that using
874 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
875 /// using [`Release`] makes the load part [`Relaxed`].
877 /// **Note:** This method is only available on platforms that support atomic
878 /// operations on `u8`.
883 /// #![feature(atomic_bool_fetch_not)]
884 /// use std::sync::atomic::{AtomicBool, Ordering};
886 /// let foo = AtomicBool::new(true);
887 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), true);
888 /// assert_eq!(foo.load(Ordering::SeqCst), false);
890 /// let foo = AtomicBool::new(false);
891 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), false);
892 /// assert_eq!(foo.load(Ordering::SeqCst), true);
895 #[unstable(feature = "atomic_bool_fetch_not", issue = "98485")]
896 #[cfg(target_has_atomic = "8")]
897 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
898 pub fn fetch_not(&self, order: Ordering) -> bool {
899 self.fetch_xor(true, order)
902 /// Returns a mutable pointer to the underlying [`bool`].
904 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
905 /// This method is mostly useful for FFI, where the function signature may use
906 /// `*mut bool` instead of `&AtomicBool`.
908 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
909 /// atomic types work with interior mutability. All modifications of an atomic change the value
910 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
911 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
912 /// restriction: operations on it must be atomic.
916 /// ```ignore (extern-declaration)
918 /// use std::sync::atomic::AtomicBool;
920 /// fn my_atomic_op(arg: *mut bool);
923 /// let mut atomic = AtomicBool::new(true);
925 /// my_atomic_op(atomic.as_mut_ptr());
930 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
931 pub fn as_mut_ptr(&self) -> *mut bool {
932 self.v.get() as *mut bool
935 /// Fetches the value, and applies a function to it that returns an optional
936 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
937 /// returned `Some(_)`, else `Err(previous_value)`.
939 /// Note: This may call the function multiple times if the value has been
940 /// changed from other threads in the meantime, as long as the function
941 /// returns `Some(_)`, but the function will have been applied only once to
942 /// the stored value.
944 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
945 /// ordering of this operation. The first describes the required ordering for
946 /// when the operation finally succeeds while the second describes the
947 /// required ordering for loads. These correspond to the success and failure
948 /// orderings of [`AtomicBool::compare_exchange`] respectively.
950 /// Using [`Acquire`] as success ordering makes the store part of this
951 /// operation [`Relaxed`], and using [`Release`] makes the final successful
952 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
953 /// [`Acquire`] or [`Relaxed`].
955 /// **Note:** This method is only available on platforms that support atomic
956 /// operations on `u8`.
960 /// This method is not magic; it is not provided by the hardware.
961 /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks.
962 /// In particular, this method will not circumvent the [ABA Problem].
964 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
969 /// use std::sync::atomic::{AtomicBool, Ordering};
971 /// let x = AtomicBool::new(false);
972 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
973 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
974 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
975 /// assert_eq!(x.load(Ordering::SeqCst), false);
978 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
979 #[cfg(target_has_atomic = "8")]
980 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
981 pub fn fetch_update<F>(
984 fetch_order: Ordering,
986 ) -> Result<bool, bool>
988 F: FnMut(bool) -> Option<bool>,
990 let mut prev = self.load(fetch_order);
991 while let Some(next) = f(prev) {
992 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
993 x @ Ok(_) => return x,
994 Err(next_prev) => prev = next_prev,
1001 #[cfg(target_has_atomic_load_store = "ptr")]
1002 impl<T> AtomicPtr<T> {
1003 /// Creates a new `AtomicPtr`.
1008 /// use std::sync::atomic::AtomicPtr;
1010 /// let ptr = &mut 5;
1011 /// let atomic_ptr = AtomicPtr::new(ptr);
1014 #[stable(feature = "rust1", since = "1.0.0")]
1015 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
1016 pub const fn new(p: *mut T) -> AtomicPtr<T> {
1017 AtomicPtr { p: UnsafeCell::new(p) }
1020 /// Returns a mutable reference to the underlying pointer.
1022 /// This is safe because the mutable reference guarantees that no other threads are
1023 /// concurrently accessing the atomic data.
1028 /// use std::sync::atomic::{AtomicPtr, Ordering};
1030 /// let mut data = 10;
1031 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
1032 /// let mut other_data = 5;
1033 /// *atomic_ptr.get_mut() = &mut other_data;
1034 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
1037 #[stable(feature = "atomic_access", since = "1.15.0")]
1038 pub fn get_mut(&mut self) -> &mut *mut T {
1042 /// Get atomic access to a pointer.
1047 /// #![feature(atomic_from_mut)]
1048 /// use std::sync::atomic::{AtomicPtr, Ordering};
1050 /// let mut data = 123;
1051 /// let mut some_ptr = &mut data as *mut i32;
1052 /// let a = AtomicPtr::from_mut(&mut some_ptr);
1053 /// let mut other_data = 456;
1054 /// a.store(&mut other_data, Ordering::Relaxed);
1055 /// assert_eq!(unsafe { *some_ptr }, 456);
1058 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1059 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1060 pub fn from_mut(v: &mut *mut T) -> &mut Self {
1061 use crate::mem::align_of;
1062 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
1064 // - the mutable reference guarantees unique ownership.
1065 // - the alignment of `*mut T` and `Self` is the same on all platforms
1066 // supported by rust, as verified above.
1067 unsafe { &mut *(v as *mut *mut T as *mut Self) }
1070 /// Get non-atomic access to a `&mut [AtomicPtr]` slice.
1072 /// This is safe because the mutable reference guarantees that no other threads are
1073 /// concurrently accessing the atomic data.
1078 /// #![feature(atomic_from_mut, inline_const)]
1079 /// use std::ptr::null_mut;
1080 /// use std::sync::atomic::{AtomicPtr, Ordering};
1082 /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10];
1084 /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs);
1085 /// assert_eq!(view, [null_mut::<String>(); 10]);
1089 /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}"))));
1091 /// std::thread::scope(|s| {
1092 /// for ptr in &some_ptrs {
1093 /// s.spawn(move || {
1094 /// let ptr = ptr.load(Ordering::Relaxed);
1095 /// assert!(!ptr.is_null());
1097 /// let name = unsafe { Box::from_raw(ptr) };
1098 /// println!("Hello, {name}!");
1104 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1105 pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] {
1106 // SAFETY: the mutable reference guarantees unique ownership.
1107 unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) }
1110 /// Get atomic access to a slice of pointers.
1115 /// #![feature(atomic_from_mut)]
1116 /// use std::ptr::null_mut;
1117 /// use std::sync::atomic::{AtomicPtr, Ordering};
1119 /// let mut some_ptrs = [null_mut::<String>(); 10];
1120 /// let a = &*AtomicPtr::from_mut_slice(&mut some_ptrs);
1121 /// std::thread::scope(|s| {
1122 /// for i in 0..a.len() {
1123 /// s.spawn(move || {
1124 /// let name = Box::new(format!("thread{i}"));
1125 /// a[i].store(Box::into_raw(name), Ordering::Relaxed);
1129 /// for p in some_ptrs {
1130 /// assert!(!p.is_null());
1131 /// let name = unsafe { Box::from_raw(p) };
1132 /// println!("Hello, {name}!");
1136 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1137 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1138 pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] {
1140 // - the mutable reference guarantees unique ownership.
1141 // - the alignment of `*mut T` and `Self` is the same on all platforms
1142 // supported by rust, as verified above.
1143 unsafe { &mut *(v as *mut [*mut T] as *mut [Self]) }
1146 /// Consumes the atomic and returns the contained value.
1148 /// This is safe because passing `self` by value guarantees that no other threads are
1149 /// concurrently accessing the atomic data.
1154 /// use std::sync::atomic::AtomicPtr;
1156 /// let mut data = 5;
1157 /// let atomic_ptr = AtomicPtr::new(&mut data);
1158 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
1161 #[stable(feature = "atomic_access", since = "1.15.0")]
1162 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
1163 pub const fn into_inner(self) -> *mut T {
1167 /// Loads a value from the pointer.
1169 /// `load` takes an [`Ordering`] argument which describes the memory ordering
1170 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1174 /// Panics if `order` is [`Release`] or [`AcqRel`].
1179 /// use std::sync::atomic::{AtomicPtr, Ordering};
1181 /// let ptr = &mut 5;
1182 /// let some_ptr = AtomicPtr::new(ptr);
1184 /// let value = some_ptr.load(Ordering::Relaxed);
1187 #[stable(feature = "rust1", since = "1.0.0")]
1188 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1189 pub fn load(&self, order: Ordering) -> *mut T {
1190 // SAFETY: data races are prevented by atomic intrinsics.
1191 unsafe { atomic_load(self.p.get(), order) }
1194 /// Stores a value into the pointer.
1196 /// `store` takes an [`Ordering`] argument which describes the memory ordering
1197 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1201 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1206 /// use std::sync::atomic::{AtomicPtr, Ordering};
1208 /// let ptr = &mut 5;
1209 /// let some_ptr = AtomicPtr::new(ptr);
1211 /// let other_ptr = &mut 10;
1213 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1216 #[stable(feature = "rust1", since = "1.0.0")]
1217 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1218 pub fn store(&self, ptr: *mut T, order: Ordering) {
1219 // SAFETY: data races are prevented by atomic intrinsics.
1221 atomic_store(self.p.get(), ptr, order);
1225 /// Stores a value into the pointer, returning the previous value.
1227 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1228 /// of this operation. All ordering modes are possible. Note that using
1229 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1230 /// using [`Release`] makes the load part [`Relaxed`].
1232 /// **Note:** This method is only available on platforms that support atomic
1233 /// operations on pointers.
1238 /// use std::sync::atomic::{AtomicPtr, Ordering};
1240 /// let ptr = &mut 5;
1241 /// let some_ptr = AtomicPtr::new(ptr);
1243 /// let other_ptr = &mut 10;
1245 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1248 #[stable(feature = "rust1", since = "1.0.0")]
1249 #[cfg(target_has_atomic = "ptr")]
1250 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1251 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1252 // SAFETY: data races are prevented by atomic intrinsics.
1253 unsafe { atomic_swap(self.p.get(), ptr, order) }
1256 /// Stores a value into the pointer if the current value is the same as the `current` value.
1258 /// The return value is always the previous value. If it is equal to `current`, then the value
1261 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1262 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1263 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1264 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1265 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1267 /// **Note:** This method is only available on platforms that support atomic
1268 /// operations on pointers.
1270 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1272 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1273 /// memory orderings:
1275 /// Original | Success | Failure
1276 /// -------- | ------- | -------
1277 /// Relaxed | Relaxed | Relaxed
1278 /// Acquire | Acquire | Acquire
1279 /// Release | Release | Relaxed
1280 /// AcqRel | AcqRel | Acquire
1281 /// SeqCst | SeqCst | SeqCst
1283 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1284 /// which allows the compiler to generate better assembly code when the compare and swap
1285 /// is used in a loop.
1290 /// use std::sync::atomic::{AtomicPtr, Ordering};
1292 /// let ptr = &mut 5;
1293 /// let some_ptr = AtomicPtr::new(ptr);
1295 /// let other_ptr = &mut 10;
1297 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1300 #[stable(feature = "rust1", since = "1.0.0")]
1303 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
1305 #[cfg(target_has_atomic = "ptr")]
1306 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1307 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1308 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1314 /// Stores a value into the pointer if the current value is the same as the `current` value.
1316 /// The return value is a result indicating whether the new value was written and containing
1317 /// the previous value. On success this value is guaranteed to be equal to `current`.
1319 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1320 /// ordering of this operation. `success` describes the required ordering for the
1321 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1322 /// `failure` describes the required ordering for the load operation that takes place when
1323 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1324 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1325 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1327 /// **Note:** This method is only available on platforms that support atomic
1328 /// operations on pointers.
1333 /// use std::sync::atomic::{AtomicPtr, Ordering};
1335 /// let ptr = &mut 5;
1336 /// let some_ptr = AtomicPtr::new(ptr);
1338 /// let other_ptr = &mut 10;
1340 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1341 /// Ordering::SeqCst, Ordering::Relaxed);
1344 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1345 #[cfg(target_has_atomic = "ptr")]
1346 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1347 pub fn compare_exchange(
1353 ) -> Result<*mut T, *mut T> {
1354 // SAFETY: data races are prevented by atomic intrinsics.
1355 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1358 /// Stores a value into the pointer if the current value is the same as the `current` value.
1360 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1361 /// comparison succeeds, which can result in more efficient code on some platforms. The
1362 /// return value is a result indicating whether the new value was written and containing the
1365 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1366 /// ordering of this operation. `success` describes the required ordering for the
1367 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1368 /// `failure` describes the required ordering for the load operation that takes place when
1369 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1370 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1371 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1373 /// **Note:** This method is only available on platforms that support atomic
1374 /// operations on pointers.
1379 /// use std::sync::atomic::{AtomicPtr, Ordering};
1381 /// let some_ptr = AtomicPtr::new(&mut 5);
1383 /// let new = &mut 10;
1384 /// let mut old = some_ptr.load(Ordering::Relaxed);
1386 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1388 /// Err(x) => old = x,
1393 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1394 #[cfg(target_has_atomic = "ptr")]
1395 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1396 pub fn compare_exchange_weak(
1402 ) -> Result<*mut T, *mut T> {
1403 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1404 // but we know for sure that the pointer is valid (we just got it from
1405 // an `UnsafeCell` that we have by reference) and the atomic operation
1406 // itself allows us to safely mutate the `UnsafeCell` contents.
1407 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
1410 /// Fetches the value, and applies a function to it that returns an optional
1411 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1412 /// returned `Some(_)`, else `Err(previous_value)`.
1414 /// Note: This may call the function multiple times if the value has been
1415 /// changed from other threads in the meantime, as long as the function
1416 /// returns `Some(_)`, but the function will have been applied only once to
1417 /// the stored value.
1419 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1420 /// ordering of this operation. The first describes the required ordering for
1421 /// when the operation finally succeeds while the second describes the
1422 /// required ordering for loads. These correspond to the success and failure
1423 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
1425 /// Using [`Acquire`] as success ordering makes the store part of this
1426 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1427 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1428 /// [`Acquire`] or [`Relaxed`].
1430 /// **Note:** This method is only available on platforms that support atomic
1431 /// operations on pointers.
1433 /// # Considerations
1435 /// This method is not magic; it is not provided by the hardware.
1436 /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks.
1437 /// In particular, this method will not circumvent the [ABA Problem].
1439 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1444 /// use std::sync::atomic::{AtomicPtr, Ordering};
1446 /// let ptr: *mut _ = &mut 5;
1447 /// let some_ptr = AtomicPtr::new(ptr);
1449 /// let new: *mut _ = &mut 10;
1450 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
1451 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
1458 /// assert_eq!(result, Ok(ptr));
1459 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
1462 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
1463 #[cfg(target_has_atomic = "ptr")]
1464 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1465 pub fn fetch_update<F>(
1467 set_order: Ordering,
1468 fetch_order: Ordering,
1470 ) -> Result<*mut T, *mut T>
1472 F: FnMut(*mut T) -> Option<*mut T>,
1474 let mut prev = self.load(fetch_order);
1475 while let Some(next) = f(prev) {
1476 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1477 x @ Ok(_) => return x,
1478 Err(next_prev) => prev = next_prev,
1484 /// Offsets the pointer's address by adding `val` (in units of `T`),
1485 /// returning the previous pointer.
1487 /// This is equivalent to using [`wrapping_add`] to atomically perform the
1488 /// equivalent of `ptr = ptr.wrapping_add(val);`.
1490 /// This method operates in units of `T`, which means that it cannot be used
1491 /// to offset the pointer by an amount which is not a multiple of
1492 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1493 /// work with a deliberately misaligned pointer. In such cases, you may use
1494 /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
1496 /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
1497 /// memory ordering of this operation. All ordering modes are possible. Note
1498 /// that using [`Acquire`] makes the store part of this operation
1499 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1501 /// **Note**: This method is only available on platforms that support atomic
1502 /// operations on [`AtomicPtr`].
1504 /// [`wrapping_add`]: pointer::wrapping_add
1509 /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1510 /// use core::sync::atomic::{AtomicPtr, Ordering};
1512 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1513 /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
1514 /// // Note: units of `size_of::<i64>()`.
1515 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
1518 #[cfg(target_has_atomic = "ptr")]
1519 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1520 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1521 pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
1522 self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::<T>()), order)
1525 /// Offsets the pointer's address by subtracting `val` (in units of `T`),
1526 /// returning the previous pointer.
1528 /// This is equivalent to using [`wrapping_sub`] to atomically perform the
1529 /// equivalent of `ptr = ptr.wrapping_sub(val);`.
1531 /// This method operates in units of `T`, which means that it cannot be used
1532 /// to offset the pointer by an amount which is not a multiple of
1533 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1534 /// work with a deliberately misaligned pointer. In such cases, you may use
1535 /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
1537 /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
1538 /// ordering of this operation. All ordering modes are possible. Note that
1539 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1540 /// and using [`Release`] makes the load part [`Relaxed`].
1542 /// **Note**: This method is only available on platforms that support atomic
1543 /// operations on [`AtomicPtr`].
1545 /// [`wrapping_sub`]: pointer::wrapping_sub
1550 /// #![feature(strict_provenance_atomic_ptr)]
1551 /// use core::sync::atomic::{AtomicPtr, Ordering};
1553 /// let array = [1i32, 2i32];
1554 /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
1556 /// assert!(core::ptr::eq(
1557 /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
1560 /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
1563 #[cfg(target_has_atomic = "ptr")]
1564 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1565 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1566 pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
1567 self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::<T>()), order)
1570 /// Offsets the pointer's address by adding `val` *bytes*, returning the
1571 /// previous pointer.
1573 /// This is equivalent to using [`wrapping_byte_add`] to atomically
1574 /// perform `ptr = ptr.wrapping_byte_add(val)`.
1576 /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
1577 /// memory ordering of this operation. All ordering modes are possible. Note
1578 /// that using [`Acquire`] makes the store part of this operation
1579 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1581 /// **Note**: This method is only available on platforms that support atomic
1582 /// operations on [`AtomicPtr`].
1584 /// [`wrapping_byte_add`]: pointer::wrapping_byte_add
1589 /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1590 /// use core::sync::atomic::{AtomicPtr, Ordering};
1592 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1593 /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
1594 /// // Note: in units of bytes, not `size_of::<i64>()`.
1595 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
1598 #[cfg(target_has_atomic = "ptr")]
1599 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1600 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1601 pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
1602 // SAFETY: data races are prevented by atomic intrinsics.
1603 unsafe { atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
1606 /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
1607 /// previous pointer.
1609 /// This is equivalent to using [`wrapping_byte_sub`] to atomically
1610 /// perform `ptr = ptr.wrapping_byte_sub(val)`.
1612 /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
1613 /// memory ordering of this operation. All ordering modes are possible. Note
1614 /// that using [`Acquire`] makes the store part of this operation
1615 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1617 /// **Note**: This method is only available on platforms that support atomic
1618 /// operations on [`AtomicPtr`].
1620 /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
1625 /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1626 /// use core::sync::atomic::{AtomicPtr, Ordering};
1628 /// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
1629 /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
1630 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
1633 #[cfg(target_has_atomic = "ptr")]
1634 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1635 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1636 pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
1637 // SAFETY: data races are prevented by atomic intrinsics.
1638 unsafe { atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
1641 /// Performs a bitwise "or" operation on the address of the current pointer,
1642 /// and the argument `val`, and stores a pointer with provenance of the
1643 /// current pointer and the resulting address.
1645 /// This is equivalent equivalent to using [`map_addr`] to atomically
1646 /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
1647 /// pointer schemes to atomically set tag bits.
1649 /// **Caveat**: This operation returns the previous value. To compute the
1650 /// stored value without losing provenance, you may use [`map_addr`]. For
1651 /// example: `a.fetch_or(val).map_addr(|a| a | val)`.
1653 /// `fetch_or` takes an [`Ordering`] argument which describes the memory
1654 /// ordering of this operation. All ordering modes are possible. Note that
1655 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1656 /// and using [`Release`] makes the load part [`Relaxed`].
1658 /// **Note**: This method is only available on platforms that support atomic
1659 /// operations on [`AtomicPtr`].
1661 /// This API and its claimed semantics are part of the Strict Provenance
1662 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
1665 /// [`map_addr`]: pointer::map_addr
1670 /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1671 /// use core::sync::atomic::{AtomicPtr, Ordering};
1673 /// let pointer = &mut 3i64 as *mut i64;
1675 /// let atom = AtomicPtr::<i64>::new(pointer);
1676 /// // Tag the bottom bit of the pointer.
1677 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
1678 /// // Extract and untag.
1679 /// let tagged = atom.load(Ordering::Relaxed);
1680 /// assert_eq!(tagged.addr() & 1, 1);
1681 /// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
1684 #[cfg(target_has_atomic = "ptr")]
1685 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1686 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1687 pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
1688 // SAFETY: data races are prevented by atomic intrinsics.
1689 unsafe { atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
1692 /// Performs a bitwise "and" operation on the address of the current
1693 /// pointer, and the argument `val`, and stores a pointer with provenance of
1694 /// the current pointer and the resulting address.
1696 /// This is equivalent equivalent to using [`map_addr`] to atomically
1697 /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
1698 /// pointer schemes to atomically unset tag bits.
1700 /// **Caveat**: This operation returns the previous value. To compute the
1701 /// stored value without losing provenance, you may use [`map_addr`]. For
1702 /// example: `a.fetch_and(val).map_addr(|a| a & val)`.
1704 /// `fetch_and` takes an [`Ordering`] argument which describes the memory
1705 /// ordering of this operation. All ordering modes are possible. Note that
1706 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1707 /// and using [`Release`] makes the load part [`Relaxed`].
1709 /// **Note**: This method is only available on platforms that support atomic
1710 /// operations on [`AtomicPtr`].
1712 /// This API and its claimed semantics are part of the Strict Provenance
1713 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
1716 /// [`map_addr`]: pointer::map_addr
1721 /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1722 /// use core::sync::atomic::{AtomicPtr, Ordering};
1724 /// let pointer = &mut 3i64 as *mut i64;
1725 /// // A tagged pointer
1726 /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
1727 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
1728 /// // Untag, and extract the previously tagged pointer.
1729 /// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
1730 /// .map_addr(|a| a & !1);
1731 /// assert_eq!(untagged, pointer);
1734 #[cfg(target_has_atomic = "ptr")]
1735 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1736 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1737 pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
1738 // SAFETY: data races are prevented by atomic intrinsics.
1739 unsafe { atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
1742 /// Performs a bitwise "xor" operation on the address of the current
1743 /// pointer, and the argument `val`, and stores a pointer with provenance of
1744 /// the current pointer and the resulting address.
1746 /// This is equivalent equivalent to using [`map_addr`] to atomically
1747 /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
1748 /// pointer schemes to atomically toggle tag bits.
1750 /// **Caveat**: This operation returns the previous value. To compute the
1751 /// stored value without losing provenance, you may use [`map_addr`]. For
1752 /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
1754 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory
1755 /// ordering of this operation. All ordering modes are possible. Note that
1756 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1757 /// and using [`Release`] makes the load part [`Relaxed`].
1759 /// **Note**: This method is only available on platforms that support atomic
1760 /// operations on [`AtomicPtr`].
1762 /// This API and its claimed semantics are part of the Strict Provenance
1763 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
1766 /// [`map_addr`]: pointer::map_addr
1771 /// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1772 /// use core::sync::atomic::{AtomicPtr, Ordering};
1774 /// let pointer = &mut 3i64 as *mut i64;
1775 /// let atom = AtomicPtr::<i64>::new(pointer);
1777 /// // Toggle a tag bit on the pointer.
1778 /// atom.fetch_xor(1, Ordering::Relaxed);
1779 /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
1782 #[cfg(target_has_atomic = "ptr")]
1783 #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
1784 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1785 pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
1786 // SAFETY: data races are prevented by atomic intrinsics.
1787 unsafe { atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
1791 #[cfg(target_has_atomic_load_store = "8")]
1792 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1793 #[rustc_const_unstable(feature = "const_convert", issue = "88674")]
1794 impl const From<bool> for AtomicBool {
1795 /// Converts a `bool` into an `AtomicBool`.
1800 /// use std::sync::atomic::AtomicBool;
1801 /// let atomic_bool = AtomicBool::from(true);
1802 /// assert_eq!(format!("{atomic_bool:?}"), "true")
1805 fn from(b: bool) -> Self {
1810 #[cfg(target_has_atomic_load_store = "ptr")]
1811 #[stable(feature = "atomic_from", since = "1.23.0")]
1812 #[rustc_const_unstable(feature = "const_convert", issue = "88674")]
1813 impl<T> const From<*mut T> for AtomicPtr<T> {
1814 /// Converts a `*mut T` into an `AtomicPtr<T>`.
1816 fn from(p: *mut T) -> Self {
1821 #[allow(unused_macros)] // This macro ends up being unused on some architectures.
1822 macro_rules! if_not_8_bit {
1823 (u8, $($tt:tt)*) => { "" };
1824 (i8, $($tt:tt)*) => { "" };
1825 ($_:ident, $($tt:tt)*) => { $($tt)* };
1828 #[cfg(target_has_atomic_load_store = "8")]
1829 macro_rules! atomic_int {
1835 $stable_access:meta,
1839 $stable_init_const:meta,
1840 $diagnostic_item:meta,
1841 $s_int_type:literal,
1842 $extra_feature:expr,
1843 $min_fn:ident, $max_fn:ident,
1846 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1847 /// An integer type which can be safely shared between threads.
1849 /// This type has the same in-memory representation as the underlying
1850 /// integer type, [`
1851 #[doc = $s_int_type]
1852 /// `]. For more about the differences between atomic types and
1853 /// non-atomic types as well as information about the portability of
1854 /// this type, please see the [module-level documentation].
1856 /// **Note:** This type is only available on platforms that support
1857 /// atomic loads and stores of [`
1858 #[doc = $s_int_type]
1861 /// [module-level documentation]: crate::sync::atomic
1864 #[repr(C, align($align))]
1865 pub struct $atomic_type {
1866 v: UnsafeCell<$int_type>,
1869 /// An atomic integer initialized to `0`.
1870 #[$stable_init_const]
1873 note = "the `new` function is now preferred",
1874 suggestion = $atomic_new,
1876 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1879 #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
1880 impl const Default for $atomic_type {
1882 fn default() -> Self {
1883 Self::new(Default::default())
1888 #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
1889 impl const From<$int_type> for $atomic_type {
1890 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
1892 fn from(v: $int_type) -> Self { Self::new(v) }
1896 impl fmt::Debug for $atomic_type {
1897 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1898 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
1902 // Send is implicitly implemented.
1904 unsafe impl Sync for $atomic_type {}
1907 /// Creates a new atomic integer.
1912 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1914 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
1920 pub const fn new(v: $int_type) -> Self {
1921 Self {v: UnsafeCell::new(v)}
1924 /// Returns a mutable reference to the underlying integer.
1926 /// This is safe because the mutable reference guarantees that no other threads are
1927 /// concurrently accessing the atomic data.
1932 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1934 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
1935 /// assert_eq!(*some_var.get_mut(), 10);
1936 /// *some_var.get_mut() = 5;
1937 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
1941 pub fn get_mut(&mut self) -> &mut $int_type {
1945 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
1947 #[doc = if_not_8_bit! {
1950 "**Note:** This function is only available on targets where `",
1951 stringify!($int_type), "` has an alignment of ", $align, " bytes."
1958 /// #![feature(atomic_from_mut)]
1959 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1961 /// let mut some_int = 123;
1962 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
1963 /// a.store(100, Ordering::Relaxed);
1964 /// assert_eq!(some_int, 100);
1969 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1970 pub fn from_mut(v: &mut $int_type) -> &mut Self {
1971 use crate::mem::align_of;
1972 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
1974 // - the mutable reference guarantees unique ownership.
1975 // - the alignment of `$int_type` and `Self` is the
1976 // same, as promised by $cfg_align and verified above.
1977 unsafe { &mut *(v as *mut $int_type as *mut Self) }
1980 #[doc = concat!("Get non-atomic access to a `&mut [", stringify!($atomic_type), "]` slice")]
1982 /// This is safe because the mutable reference guarantees that no other threads are
1983 /// concurrently accessing the atomic data.
1988 /// #![feature(atomic_from_mut, inline_const)]
1989 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1991 #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")]
1993 #[doc = concat!("let view: &mut [", stringify!($int_type), "] = ", stringify!($atomic_type), "::get_mut_slice(&mut some_ints);")]
1994 /// assert_eq!(view, [0; 10]);
1998 /// .for_each(|(idx, int)| *int = idx as _);
2000 /// std::thread::scope(|s| {
2004 /// .for_each(|(idx, int)| {
2005 /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _));
2010 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2011 pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
2012 // SAFETY: the mutable reference guarantees unique ownership.
2013 unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
2016 #[doc = concat!("Get atomic access to a `&mut [", stringify!($int_type), "]` slice.")]
2021 /// #![feature(atomic_from_mut)]
2022 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2024 /// let mut some_ints = [0; 10];
2025 #[doc = concat!("let a = &*", stringify!($atomic_type), "::from_mut_slice(&mut some_ints);")]
2026 /// std::thread::scope(|s| {
2027 /// for i in 0..a.len() {
2028 /// s.spawn(move || a[i].store(i as _, Ordering::Relaxed));
2031 /// for (i, n) in some_ints.into_iter().enumerate() {
2032 /// assert_eq!(i, n as usize);
2037 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2038 pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
2039 use crate::mem::align_of;
2040 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2042 // - the mutable reference guarantees unique ownership.
2043 // - the alignment of `$int_type` and `Self` is the
2044 // same, as promised by $cfg_align and verified above.
2045 unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
2048 /// Consumes the atomic and returns the contained value.
2050 /// This is safe because passing `self` by value guarantees that no other threads are
2051 /// concurrently accessing the atomic data.
2056 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2058 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2059 /// assert_eq!(some_var.into_inner(), 5);
2063 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
2064 pub const fn into_inner(self) -> $int_type {
2068 /// Loads a value from the atomic integer.
2070 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2071 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
2075 /// Panics if `order` is [`Release`] or [`AcqRel`].
2080 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2082 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2084 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
2088 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2089 pub fn load(&self, order: Ordering) -> $int_type {
2090 // SAFETY: data races are prevented by atomic intrinsics.
2091 unsafe { atomic_load(self.v.get(), order) }
2094 /// Stores a value into the atomic integer.
2096 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2097 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
2101 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
2106 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2108 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2110 /// some_var.store(10, Ordering::Relaxed);
2111 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2115 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2116 pub fn store(&self, val: $int_type, order: Ordering) {
2117 // SAFETY: data races are prevented by atomic intrinsics.
2118 unsafe { atomic_store(self.v.get(), val, order); }
2121 /// Stores a value into the atomic integer, returning the previous value.
2123 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
2124 /// of this operation. All ordering modes are possible. Note that using
2125 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2126 /// using [`Release`] makes the load part [`Relaxed`].
2128 /// **Note**: This method is only available on platforms that support atomic operations on
2129 #[doc = concat!("[`", $s_int_type, "`].")]
2134 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2136 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2138 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
2143 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2144 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
2145 // SAFETY: data races are prevented by atomic intrinsics.
2146 unsafe { atomic_swap(self.v.get(), val, order) }
2149 /// Stores a value into the atomic integer if the current value is the same as
2150 /// the `current` value.
2152 /// The return value is always the previous value. If it is equal to `current`, then the
2153 /// value was updated.
2155 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
2156 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
2157 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
2158 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
2159 /// happens, and using [`Release`] makes the load part [`Relaxed`].
2161 /// **Note**: This method is only available on platforms that support atomic operations on
2162 #[doc = concat!("[`", $s_int_type, "`].")]
2164 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
2166 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
2167 /// memory orderings:
2169 /// Original | Success | Failure
2170 /// -------- | ------- | -------
2171 /// Relaxed | Relaxed | Relaxed
2172 /// Acquire | Acquire | Acquire
2173 /// Release | Release | Relaxed
2174 /// AcqRel | AcqRel | Acquire
2175 /// SeqCst | SeqCst | SeqCst
2177 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
2178 /// which allows the compiler to generate better assembly code when the compare and swap
2179 /// is used in a loop.
2184 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2186 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2188 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
2189 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2191 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
2192 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2198 note = "Use `compare_exchange` or `compare_exchange_weak` instead")
2201 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2202 pub fn compare_and_swap(&self,
2205 order: Ordering) -> $int_type {
2206 match self.compare_exchange(current,
2209 strongest_failure_ordering(order)) {
2215 /// Stores a value into the atomic integer if the current value is the same as
2216 /// the `current` value.
2218 /// The return value is a result indicating whether the new value was written and
2219 /// containing the previous value. On success this value is guaranteed to be equal to
2222 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
2223 /// ordering of this operation. `success` describes the required ordering for the
2224 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
2225 /// `failure` describes the required ordering for the load operation that takes place when
2226 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
2227 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
2228 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
2230 /// **Note**: This method is only available on platforms that support atomic operations on
2231 #[doc = concat!("[`", $s_int_type, "`].")]
2236 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2238 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2240 /// assert_eq!(some_var.compare_exchange(5, 10,
2241 /// Ordering::Acquire,
2242 /// Ordering::Relaxed),
2244 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2246 /// assert_eq!(some_var.compare_exchange(6, 12,
2247 /// Ordering::SeqCst,
2248 /// Ordering::Acquire),
2250 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2255 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2256 pub fn compare_exchange(&self,
2260 failure: Ordering) -> Result<$int_type, $int_type> {
2261 // SAFETY: data races are prevented by atomic intrinsics.
2262 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
2265 /// Stores a value into the atomic integer if the current value is the same as
2266 /// the `current` value.
2268 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
2269 /// this function is allowed to spuriously fail even
2270 /// when the comparison succeeds, which can result in more efficient code on some
2271 /// platforms. The return value is a result indicating whether the new value was
2272 /// written and containing the previous value.
2274 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
2275 /// ordering of this operation. `success` describes the required ordering for the
2276 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
2277 /// `failure` describes the required ordering for the load operation that takes place when
2278 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
2279 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
2280 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
2282 /// **Note**: This method is only available on platforms that support atomic operations on
2283 #[doc = concat!("[`", $s_int_type, "`].")]
2288 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2290 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
2292 /// let mut old = val.load(Ordering::Relaxed);
2294 /// let new = old * 2;
2295 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
2297 /// Err(x) => old = x,
2304 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2305 pub fn compare_exchange_weak(&self,
2309 failure: Ordering) -> Result<$int_type, $int_type> {
2310 // SAFETY: data races are prevented by atomic intrinsics.
2312 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
2316 /// Adds to the current value, returning the previous value.
2318 /// This operation wraps around on overflow.
2320 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
2321 /// of this operation. All ordering modes are possible. Note that using
2322 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2323 /// using [`Release`] makes the load part [`Relaxed`].
2325 /// **Note**: This method is only available on platforms that support atomic operations on
2326 #[doc = concat!("[`", $s_int_type, "`].")]
2331 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2333 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
2334 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
2335 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
2340 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2341 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
2342 // SAFETY: data races are prevented by atomic intrinsics.
2343 unsafe { atomic_add(self.v.get(), val, order) }
2346 /// Subtracts from the current value, returning the previous value.
2348 /// This operation wraps around on overflow.
2350 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
2351 /// of this operation. All ordering modes are possible. Note that using
2352 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2353 /// using [`Release`] makes the load part [`Relaxed`].
2355 /// **Note**: This method is only available on platforms that support atomic operations on
2356 #[doc = concat!("[`", $s_int_type, "`].")]
2361 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2363 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
2364 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
2365 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
2370 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2371 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
2372 // SAFETY: data races are prevented by atomic intrinsics.
2373 unsafe { atomic_sub(self.v.get(), val, order) }
2376 /// Bitwise "and" with the current value.
2378 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
2379 /// sets the new value to the result.
2381 /// Returns the previous value.
2383 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
2384 /// of this operation. All ordering modes are possible. Note that using
2385 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2386 /// using [`Release`] makes the load part [`Relaxed`].
2388 /// **Note**: This method is only available on platforms that support atomic operations on
2389 #[doc = concat!("[`", $s_int_type, "`].")]
2394 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2396 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
2397 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
2398 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
2403 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2404 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
2405 // SAFETY: data races are prevented by atomic intrinsics.
2406 unsafe { atomic_and(self.v.get(), val, order) }
2409 /// Bitwise "nand" with the current value.
2411 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
2412 /// sets the new value to the result.
2414 /// Returns the previous value.
2416 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
2417 /// of this operation. All ordering modes are possible. Note that using
2418 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2419 /// using [`Release`] makes the load part [`Relaxed`].
2421 /// **Note**: This method is only available on platforms that support atomic operations on
2422 #[doc = concat!("[`", $s_int_type, "`].")]
2427 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2429 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
2430 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
2431 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
2436 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2437 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
2438 // SAFETY: data races are prevented by atomic intrinsics.
2439 unsafe { atomic_nand(self.v.get(), val, order) }
2442 /// Bitwise "or" with the current value.
2444 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
2445 /// sets the new value to the result.
2447 /// Returns the previous value.
2449 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
2450 /// of this operation. All ordering modes are possible. Note that using
2451 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2452 /// using [`Release`] makes the load part [`Relaxed`].
2454 /// **Note**: This method is only available on platforms that support atomic operations on
2455 #[doc = concat!("[`", $s_int_type, "`].")]
2460 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2462 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
2463 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
2464 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
2469 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2470 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
2471 // SAFETY: data races are prevented by atomic intrinsics.
2472 unsafe { atomic_or(self.v.get(), val, order) }
2475 /// Bitwise "xor" with the current value.
2477 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
2478 /// sets the new value to the result.
2480 /// Returns the previous value.
2482 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
2483 /// of this operation. All ordering modes are possible. Note that using
2484 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2485 /// using [`Release`] makes the load part [`Relaxed`].
2487 /// **Note**: This method is only available on platforms that support atomic operations on
2488 #[doc = concat!("[`", $s_int_type, "`].")]
2493 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2495 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
2496 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
2497 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
2502 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2503 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
2504 // SAFETY: data races are prevented by atomic intrinsics.
2505 unsafe { atomic_xor(self.v.get(), val, order) }
2508 /// Fetches the value, and applies a function to it that returns an optional
2509 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
2510 /// `Err(previous_value)`.
2512 /// Note: This may call the function multiple times if the value has been changed from other threads in
2513 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
2514 /// only once to the stored value.
2516 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
2517 /// The first describes the required ordering for when the operation finally succeeds while the second
2518 /// describes the required ordering for loads. These correspond to the success and failure orderings of
2519 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
2522 /// Using [`Acquire`] as success ordering makes the store part
2523 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
2524 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
2526 /// **Note**: This method is only available on platforms that support atomic operations on
2527 #[doc = concat!("[`", $s_int_type, "`].")]
2529 /// # Considerations
2531 /// This method is not magic; it is not provided by the hardware.
2532 /// It is implemented in terms of
2533 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange_weak`],")]
2534 /// and suffers from the same drawbacks.
2535 /// In particular, this method will not circumvent the [ABA Problem].
2537 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2542 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2544 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
2545 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
2546 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
2547 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
2548 /// assert_eq!(x.load(Ordering::SeqCst), 9);
2551 #[stable(feature = "no_more_cas", since = "1.45.0")]
2553 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2554 pub fn fetch_update<F>(&self,
2555 set_order: Ordering,
2556 fetch_order: Ordering,
2557 mut f: F) -> Result<$int_type, $int_type>
2558 where F: FnMut($int_type) -> Option<$int_type> {
2559 let mut prev = self.load(fetch_order);
2560 while let Some(next) = f(prev) {
2561 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
2562 x @ Ok(_) => return x,
2563 Err(next_prev) => prev = next_prev
2569 /// Maximum with the current value.
2571 /// Finds the maximum of the current value and the argument `val`, and
2572 /// sets the new value to the result.
2574 /// Returns the previous value.
2576 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
2577 /// of this operation. All ordering modes are possible. Note that using
2578 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2579 /// using [`Release`] makes the load part [`Relaxed`].
2581 /// **Note**: This method is only available on platforms that support atomic operations on
2582 #[doc = concat!("[`", $s_int_type, "`].")]
2587 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2589 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2590 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
2591 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
2594 /// If you want to obtain the maximum value in one step, you can use the following:
2597 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2599 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2601 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
2602 /// assert!(max_foo == 42);
2605 #[stable(feature = "atomic_min_max", since = "1.45.0")]
2607 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2608 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
2609 // SAFETY: data races are prevented by atomic intrinsics.
2610 unsafe { $max_fn(self.v.get(), val, order) }
2613 /// Minimum with the current value.
2615 /// Finds the minimum of the current value and the argument `val`, and
2616 /// sets the new value to the result.
2618 /// Returns the previous value.
2620 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
2621 /// of this operation. All ordering modes are possible. Note that using
2622 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2623 /// using [`Release`] makes the load part [`Relaxed`].
2625 /// **Note**: This method is only available on platforms that support atomic operations on
2626 #[doc = concat!("[`", $s_int_type, "`].")]
2631 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2633 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2634 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
2635 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
2636 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
2637 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
2640 /// If you want to obtain the minimum value in one step, you can use the following:
2643 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2645 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2647 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
2648 /// assert_eq!(min_foo, 12);
2651 #[stable(feature = "atomic_min_max", since = "1.45.0")]
2653 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2654 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
2655 // SAFETY: data races are prevented by atomic intrinsics.
2656 unsafe { $min_fn(self.v.get(), val, order) }
2659 /// Returns a mutable pointer to the underlying integer.
2661 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
2662 /// This method is mostly useful for FFI, where the function signature may use
2663 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
2665 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2666 /// atomic types work with interior mutability. All modifications of an atomic change the value
2667 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2668 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
2669 /// restriction: operations on it must be atomic.
2673 /// ```ignore (extern-declaration)
2675 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2678 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
2681 #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
2683 // SAFETY: Safe as long as `my_atomic_op` is atomic.
2685 /// my_atomic_op(atomic.as_mut_ptr());
2690 #[unstable(feature = "atomic_mut_ptr",
2691 reason = "recently added",
2693 pub fn as_mut_ptr(&self) -> *mut $int_type {
2700 #[cfg(target_has_atomic_load_store = "8")]
2702 cfg(target_has_atomic = "8"),
2703 cfg(target_has_atomic_equal_alignment = "8"),
2704 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2705 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2706 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2707 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2708 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2709 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2710 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2711 unstable(feature = "integer_atomics", issue = "99069"),
2712 cfg_attr(not(test), rustc_diagnostic_item = "AtomicI8"),
2715 atomic_min, atomic_max,
2718 i8 AtomicI8 ATOMIC_I8_INIT
2720 #[cfg(target_has_atomic_load_store = "8")]
2722 cfg(target_has_atomic = "8"),
2723 cfg(target_has_atomic_equal_alignment = "8"),
2724 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2725 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2726 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2727 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2728 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2729 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2730 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2731 unstable(feature = "integer_atomics", issue = "99069"),
2732 cfg_attr(not(test), rustc_diagnostic_item = "AtomicU8"),
2735 atomic_umin, atomic_umax,
2738 u8 AtomicU8 ATOMIC_U8_INIT
2740 #[cfg(target_has_atomic_load_store = "16")]
2742 cfg(target_has_atomic = "16"),
2743 cfg(target_has_atomic_equal_alignment = "16"),
2744 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2745 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2746 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2747 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2748 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2749 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2750 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2751 unstable(feature = "integer_atomics", issue = "99069"),
2752 cfg_attr(not(test), rustc_diagnostic_item = "AtomicI16"),
2755 atomic_min, atomic_max,
2757 "AtomicI16::new(0)",
2758 i16 AtomicI16 ATOMIC_I16_INIT
2760 #[cfg(target_has_atomic_load_store = "16")]
2762 cfg(target_has_atomic = "16"),
2763 cfg(target_has_atomic_equal_alignment = "16"),
2764 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2765 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2766 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2767 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2768 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2769 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2770 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2771 unstable(feature = "integer_atomics", issue = "99069"),
2772 cfg_attr(not(test), rustc_diagnostic_item = "AtomicU16"),
2775 atomic_umin, atomic_umax,
2777 "AtomicU16::new(0)",
2778 u16 AtomicU16 ATOMIC_U16_INIT
2780 #[cfg(target_has_atomic_load_store = "32")]
2782 cfg(target_has_atomic = "32"),
2783 cfg(target_has_atomic_equal_alignment = "32"),
2784 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2785 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2786 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2787 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2788 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2789 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2790 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2791 unstable(feature = "integer_atomics", issue = "99069"),
2792 cfg_attr(not(test), rustc_diagnostic_item = "AtomicI32"),
2795 atomic_min, atomic_max,
2797 "AtomicI32::new(0)",
2798 i32 AtomicI32 ATOMIC_I32_INIT
2800 #[cfg(target_has_atomic_load_store = "32")]
2802 cfg(target_has_atomic = "32"),
2803 cfg(target_has_atomic_equal_alignment = "32"),
2804 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2805 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2806 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2807 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2808 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2809 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2810 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2811 unstable(feature = "integer_atomics", issue = "99069"),
2812 cfg_attr(not(test), rustc_diagnostic_item = "AtomicU32"),
2815 atomic_umin, atomic_umax,
2817 "AtomicU32::new(0)",
2818 u32 AtomicU32 ATOMIC_U32_INIT
2820 #[cfg(target_has_atomic_load_store = "64")]
2822 cfg(target_has_atomic = "64"),
2823 cfg(target_has_atomic_equal_alignment = "64"),
2824 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2825 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2826 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2827 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2828 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2829 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2830 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2831 unstable(feature = "integer_atomics", issue = "99069"),
2832 cfg_attr(not(test), rustc_diagnostic_item = "AtomicI64"),
2835 atomic_min, atomic_max,
2837 "AtomicI64::new(0)",
2838 i64 AtomicI64 ATOMIC_I64_INIT
2840 #[cfg(target_has_atomic_load_store = "64")]
2842 cfg(target_has_atomic = "64"),
2843 cfg(target_has_atomic_equal_alignment = "64"),
2844 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2845 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2846 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2847 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2848 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2849 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2850 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2851 unstable(feature = "integer_atomics", issue = "99069"),
2852 cfg_attr(not(test), rustc_diagnostic_item = "AtomicU64"),
2855 atomic_umin, atomic_umax,
2857 "AtomicU64::new(0)",
2858 u64 AtomicU64 ATOMIC_U64_INIT
2860 #[cfg(target_has_atomic_load_store = "128")]
2862 cfg(target_has_atomic = "128"),
2863 cfg(target_has_atomic_equal_alignment = "128"),
2864 unstable(feature = "integer_atomics", issue = "99069"),
2865 unstable(feature = "integer_atomics", issue = "99069"),
2866 unstable(feature = "integer_atomics", issue = "99069"),
2867 unstable(feature = "integer_atomics", issue = "99069"),
2868 unstable(feature = "integer_atomics", issue = "99069"),
2869 unstable(feature = "integer_atomics", issue = "99069"),
2870 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2871 unstable(feature = "integer_atomics", issue = "99069"),
2872 cfg_attr(not(test), rustc_diagnostic_item = "AtomicI128"),
2874 "#![feature(integer_atomics)]\n\n",
2875 atomic_min, atomic_max,
2877 "AtomicI128::new(0)",
2878 i128 AtomicI128 ATOMIC_I128_INIT
2880 #[cfg(target_has_atomic_load_store = "128")]
2882 cfg(target_has_atomic = "128"),
2883 cfg(target_has_atomic_equal_alignment = "128"),
2884 unstable(feature = "integer_atomics", issue = "99069"),
2885 unstable(feature = "integer_atomics", issue = "99069"),
2886 unstable(feature = "integer_atomics", issue = "99069"),
2887 unstable(feature = "integer_atomics", issue = "99069"),
2888 unstable(feature = "integer_atomics", issue = "99069"),
2889 unstable(feature = "integer_atomics", issue = "99069"),
2890 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2891 unstable(feature = "integer_atomics", issue = "99069"),
2892 cfg_attr(not(test), rustc_diagnostic_item = "AtomicU128"),
2894 "#![feature(integer_atomics)]\n\n",
2895 atomic_umin, atomic_umax,
2897 "AtomicU128::new(0)",
2898 u128 AtomicU128 ATOMIC_U128_INIT
2901 macro_rules! atomic_int_ptr_sized {
2902 ( $($target_pointer_width:literal $align:literal)* ) => { $(
2903 #[cfg(target_has_atomic_load_store = "ptr")]
2904 #[cfg(target_pointer_width = $target_pointer_width)]
2906 cfg(target_has_atomic = "ptr"),
2907 cfg(target_has_atomic_equal_alignment = "ptr"),
2908 stable(feature = "rust1", since = "1.0.0"),
2909 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2910 stable(feature = "atomic_debug", since = "1.3.0"),
2911 stable(feature = "atomic_access", since = "1.15.0"),
2912 stable(feature = "atomic_from", since = "1.23.0"),
2913 stable(feature = "atomic_nand", since = "1.27.0"),
2914 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
2915 stable(feature = "rust1", since = "1.0.0"),
2916 cfg_attr(not(test), rustc_diagnostic_item = "AtomicIsize"),
2919 atomic_min, atomic_max,
2921 "AtomicIsize::new(0)",
2922 isize AtomicIsize ATOMIC_ISIZE_INIT
2924 #[cfg(target_has_atomic_load_store = "ptr")]
2925 #[cfg(target_pointer_width = $target_pointer_width)]
2927 cfg(target_has_atomic = "ptr"),
2928 cfg(target_has_atomic_equal_alignment = "ptr"),
2929 stable(feature = "rust1", since = "1.0.0"),
2930 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2931 stable(feature = "atomic_debug", since = "1.3.0"),
2932 stable(feature = "atomic_access", since = "1.15.0"),
2933 stable(feature = "atomic_from", since = "1.23.0"),
2934 stable(feature = "atomic_nand", since = "1.27.0"),
2935 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
2936 stable(feature = "rust1", since = "1.0.0"),
2937 cfg_attr(not(test), rustc_diagnostic_item = "AtomicUsize"),
2940 atomic_umin, atomic_umax,
2942 "AtomicUsize::new(0)",
2943 usize AtomicUsize ATOMIC_USIZE_INIT
2948 atomic_int_ptr_sized! {
2955 #[cfg(target_has_atomic = "8")]
2956 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2967 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2968 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
2969 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
2972 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2973 Release => intrinsics::atomic_store_release(dst, val),
2974 SeqCst => intrinsics::atomic_store_seqcst(dst, val),
2975 Acquire => panic!("there is no such thing as an acquire store"),
2976 AcqRel => panic!("there is no such thing as an acquire-release store"),
2982 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2983 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
2984 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
2987 Relaxed => intrinsics::atomic_load_relaxed(dst),
2988 Acquire => intrinsics::atomic_load_acquire(dst),
2989 SeqCst => intrinsics::atomic_load_seqcst(dst),
2990 Release => panic!("there is no such thing as a release load"),
2991 AcqRel => panic!("there is no such thing as an acquire-release load"),
2997 #[cfg(target_has_atomic = "8")]
2998 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2999 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3000 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
3003 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
3004 Acquire => intrinsics::atomic_xchg_acquire(dst, val),
3005 Release => intrinsics::atomic_xchg_release(dst, val),
3006 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
3007 SeqCst => intrinsics::atomic_xchg_seqcst(dst, val),
3012 /// Returns the previous value (like __sync_fetch_and_add).
3014 #[cfg(target_has_atomic = "8")]
3015 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3016 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3017 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
3020 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
3021 Acquire => intrinsics::atomic_xadd_acquire(dst, val),
3022 Release => intrinsics::atomic_xadd_release(dst, val),
3023 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
3024 SeqCst => intrinsics::atomic_xadd_seqcst(dst, val),
3029 /// Returns the previous value (like __sync_fetch_and_sub).
3031 #[cfg(target_has_atomic = "8")]
3032 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3033 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3034 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
3037 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
3038 Acquire => intrinsics::atomic_xsub_acquire(dst, val),
3039 Release => intrinsics::atomic_xsub_release(dst, val),
3040 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
3041 SeqCst => intrinsics::atomic_xsub_seqcst(dst, val),
3047 #[cfg(target_has_atomic = "8")]
3048 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3049 unsafe fn atomic_compare_exchange<T: Copy>(
3056 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
3057 let (val, ok) = unsafe {
3058 match (success, failure) {
3059 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new),
3060 (Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new),
3061 (Relaxed, SeqCst) => intrinsics::atomic_cxchg_relaxed_seqcst(dst, old, new),
3062 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acquire_relaxed(dst, old, new),
3063 (Acquire, Acquire) => intrinsics::atomic_cxchg_acquire_acquire(dst, old, new),
3064 (Acquire, SeqCst) => intrinsics::atomic_cxchg_acquire_seqcst(dst, old, new),
3065 (Release, Relaxed) => intrinsics::atomic_cxchg_release_relaxed(dst, old, new),
3066 (Release, Acquire) => intrinsics::atomic_cxchg_release_acquire(dst, old, new),
3067 (Release, SeqCst) => intrinsics::atomic_cxchg_release_seqcst(dst, old, new),
3068 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_relaxed(dst, old, new),
3069 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel_acquire(dst, old, new),
3070 (AcqRel, SeqCst) => intrinsics::atomic_cxchg_acqrel_seqcst(dst, old, new),
3071 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new),
3072 (SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new),
3073 (SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new),
3074 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
3075 (_, Release) => panic!("there is no such thing as a release failure ordering"),
3078 if ok { Ok(val) } else { Err(val) }
3082 #[cfg(target_has_atomic = "8")]
3083 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3084 unsafe fn atomic_compare_exchange_weak<T: Copy>(
3091 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
3092 let (val, ok) = unsafe {
3093 match (success, failure) {
3094 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, new),
3095 (Relaxed, Acquire) => intrinsics::atomic_cxchgweak_relaxed_acquire(dst, old, new),
3096 (Relaxed, SeqCst) => intrinsics::atomic_cxchgweak_relaxed_seqcst(dst, old, new),
3097 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, new),
3098 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acquire_acquire(dst, old, new),
3099 (Acquire, SeqCst) => intrinsics::atomic_cxchgweak_acquire_seqcst(dst, old, new),
3100 (Release, Relaxed) => intrinsics::atomic_cxchgweak_release_relaxed(dst, old, new),
3101 (Release, Acquire) => intrinsics::atomic_cxchgweak_release_acquire(dst, old, new),
3102 (Release, SeqCst) => intrinsics::atomic_cxchgweak_release_seqcst(dst, old, new),
3103 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, new),
3104 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel_acquire(dst, old, new),
3105 (AcqRel, SeqCst) => intrinsics::atomic_cxchgweak_acqrel_seqcst(dst, old, new),
3106 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, new),
3107 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_seqcst_acquire(dst, old, new),
3108 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak_seqcst_seqcst(dst, old, new),
3109 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
3110 (_, Release) => panic!("there is no such thing as a release failure ordering"),
3113 if ok { Ok(val) } else { Err(val) }
3117 #[cfg(target_has_atomic = "8")]
3118 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3119 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3120 // SAFETY: the caller must uphold the safety contract for `atomic_and`
3123 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
3124 Acquire => intrinsics::atomic_and_acquire(dst, val),
3125 Release => intrinsics::atomic_and_release(dst, val),
3126 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
3127 SeqCst => intrinsics::atomic_and_seqcst(dst, val),
3133 #[cfg(target_has_atomic = "8")]
3134 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3135 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3136 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
3139 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
3140 Acquire => intrinsics::atomic_nand_acquire(dst, val),
3141 Release => intrinsics::atomic_nand_release(dst, val),
3142 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
3143 SeqCst => intrinsics::atomic_nand_seqcst(dst, val),
3149 #[cfg(target_has_atomic = "8")]
3150 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3151 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3152 // SAFETY: the caller must uphold the safety contract for `atomic_or`
3155 SeqCst => intrinsics::atomic_or_seqcst(dst, val),
3156 Acquire => intrinsics::atomic_or_acquire(dst, val),
3157 Release => intrinsics::atomic_or_release(dst, val),
3158 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
3159 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
3165 #[cfg(target_has_atomic = "8")]
3166 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3167 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3168 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
3171 SeqCst => intrinsics::atomic_xor_seqcst(dst, val),
3172 Acquire => intrinsics::atomic_xor_acquire(dst, val),
3173 Release => intrinsics::atomic_xor_release(dst, val),
3174 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
3175 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
3180 /// returns the max value (signed comparison)
3182 #[cfg(target_has_atomic = "8")]
3183 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3184 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3185 // SAFETY: the caller must uphold the safety contract for `atomic_max`
3188 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
3189 Acquire => intrinsics::atomic_max_acquire(dst, val),
3190 Release => intrinsics::atomic_max_release(dst, val),
3191 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
3192 SeqCst => intrinsics::atomic_max_seqcst(dst, val),
3197 /// returns the min value (signed comparison)
3199 #[cfg(target_has_atomic = "8")]
3200 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3201 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3202 // SAFETY: the caller must uphold the safety contract for `atomic_min`
3205 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
3206 Acquire => intrinsics::atomic_min_acquire(dst, val),
3207 Release => intrinsics::atomic_min_release(dst, val),
3208 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
3209 SeqCst => intrinsics::atomic_min_seqcst(dst, val),
3214 /// returns the max value (unsigned comparison)
3216 #[cfg(target_has_atomic = "8")]
3217 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3218 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3219 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
3222 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
3223 Acquire => intrinsics::atomic_umax_acquire(dst, val),
3224 Release => intrinsics::atomic_umax_release(dst, val),
3225 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
3226 SeqCst => intrinsics::atomic_umax_seqcst(dst, val),
3231 /// returns the min value (unsigned comparison)
3233 #[cfg(target_has_atomic = "8")]
3234 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3235 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
3236 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
3239 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
3240 Acquire => intrinsics::atomic_umin_acquire(dst, val),
3241 Release => intrinsics::atomic_umin_release(dst, val),
3242 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
3243 SeqCst => intrinsics::atomic_umin_seqcst(dst, val),
3248 /// An atomic fence.
3250 /// Depending on the specified order, a fence prevents the compiler and CPU from
3251 /// reordering certain types of memory operations around it.
3252 /// That creates synchronizes-with relationships between it and atomic operations
3253 /// or fences in other threads.
3255 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
3256 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
3257 /// exist operations X and Y, both operating on some atomic object 'M' such
3258 /// that A is sequenced before X, Y is sequenced before B and Y observes
3259 /// the change to M. This provides a happens-before dependence between A and B.
3262 /// Thread 1 Thread 2
3264 /// fence(Release); A --------------
3265 /// x.store(3, Relaxed); X --------- |
3268 /// -------------> Y if x.load(Relaxed) == 3 {
3269 /// |-------> B fence(Acquire);
3274 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
3277 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
3278 /// and [`Release`] semantics, participates in the global program order of the
3279 /// other [`SeqCst`] operations and/or fences.
3281 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
3285 /// Panics if `order` is [`Relaxed`].
3290 /// use std::sync::atomic::AtomicBool;
3291 /// use std::sync::atomic::fence;
3292 /// use std::sync::atomic::Ordering;
3294 /// // A mutual exclusion primitive based on spinlock.
3295 /// pub struct Mutex {
3296 /// flag: AtomicBool,
3300 /// pub fn new() -> Mutex {
3302 /// flag: AtomicBool::new(false),
3306 /// pub fn lock(&self) {
3307 /// // Wait until the old value is `false`.
3310 /// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
3313 /// // This fence synchronizes-with store in `unlock`.
3314 /// fence(Ordering::Acquire);
3317 /// pub fn unlock(&self) {
3318 /// self.flag.store(false, Ordering::Release);
3323 #[stable(feature = "rust1", since = "1.0.0")]
3324 #[rustc_diagnostic_item = "fence"]
3325 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3326 pub fn fence(order: Ordering) {
3327 // SAFETY: using an atomic fence is safe.
3330 Acquire => intrinsics::atomic_fence_acquire(),
3331 Release => intrinsics::atomic_fence_release(),
3332 AcqRel => intrinsics::atomic_fence_acqrel(),
3333 SeqCst => intrinsics::atomic_fence_seqcst(),
3334 Relaxed => panic!("there is no such thing as a relaxed fence"),
3339 /// A compiler memory fence.
3341 /// `compiler_fence` does not emit any machine code, but restricts the kinds
3342 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
3343 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
3344 /// or writes from before or after the call to the other side of the call to
3345 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
3346 /// from doing such re-ordering. This is not a problem in a single-threaded,
3347 /// execution context, but when other threads may modify memory at the same
3348 /// time, stronger synchronization primitives such as [`fence`] are required.
3350 /// The re-ordering prevented by the different ordering semantics are:
3352 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
3353 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
3354 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
3355 /// - with [`AcqRel`], both of the above rules are enforced.
3357 /// `compiler_fence` is generally only useful for preventing a thread from
3358 /// racing *with itself*. That is, if a given thread is executing one piece
3359 /// of code, and is then interrupted, and starts executing code elsewhere
3360 /// (while still in the same thread, and conceptually still on the same
3361 /// core). In traditional programs, this can only occur when a signal
3362 /// handler is registered. In more low-level code, such situations can also
3363 /// arise when handling interrupts, when implementing green threads with
3364 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
3365 /// discussion of [memory barriers].
3369 /// Panics if `order` is [`Relaxed`].
3373 /// Without `compiler_fence`, the `assert_eq!` in following code
3374 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
3375 /// To see why, remember that the compiler is free to swap the stores to
3376 /// `IMPORTANT_VARIABLE` and `IS_READY` since they are both
3377 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
3378 /// after `IS_READY` is updated, then the signal handler will see
3379 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
3380 /// Using a `compiler_fence` remedies this situation.
3383 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
3384 /// use std::sync::atomic::Ordering;
3385 /// use std::sync::atomic::compiler_fence;
3387 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
3388 /// static IS_READY: AtomicBool = AtomicBool::new(false);
3391 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
3392 /// // prevent earlier writes from being moved beyond this point
3393 /// compiler_fence(Ordering::Release);
3394 /// IS_READY.store(true, Ordering::Relaxed);
3397 /// fn signal_handler() {
3398 /// if IS_READY.load(Ordering::Relaxed) {
3399 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
3404 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
3406 #[stable(feature = "compiler_fences", since = "1.21.0")]
3407 #[rustc_diagnostic_item = "compiler_fence"]
3408 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3409 pub fn compiler_fence(order: Ordering) {
3410 // SAFETY: using an atomic fence is safe.
3413 Acquire => intrinsics::atomic_singlethreadfence_acquire(),
3414 Release => intrinsics::atomic_singlethreadfence_release(),
3415 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
3416 SeqCst => intrinsics::atomic_singlethreadfence_seqcst(),
3417 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
3422 #[cfg(target_has_atomic_load_store = "8")]
3423 #[stable(feature = "atomic_debug", since = "1.3.0")]
3424 impl fmt::Debug for AtomicBool {
3425 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3426 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
3430 #[cfg(target_has_atomic_load_store = "ptr")]
3431 #[stable(feature = "atomic_debug", since = "1.3.0")]
3432 impl<T> fmt::Debug for AtomicPtr<T> {
3433 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3434 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
3438 #[cfg(target_has_atomic_load_store = "ptr")]
3439 #[stable(feature = "atomic_pointer", since = "1.24.0")]
3440 impl<T> fmt::Pointer for AtomicPtr<T> {
3441 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3442 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
3446 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
3448 /// This function is deprecated in favor of [`hint::spin_loop`].
3450 /// [`hint::spin_loop`]: crate::hint::spin_loop
3452 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
3453 #[deprecated(since = "1.51.0", note = "use hint::spin_loop instead")]
3454 pub fn spin_loop_hint() {