3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! Each method takes an [`Ordering`] which represents the strength of
14 //! the memory barrier for that operation. These orderings are the
15 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
17 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
18 //! [2]: ../../../nomicon/atomics.html
20 //! Atomic variables are safe to share between threads (they implement [`Sync`])
21 //! but they do not themselves provide the mechanism for sharing and follow the
22 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
23 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
24 //! atomically-reference-counted shared pointer).
26 //! [arc]: ../../../std/sync/struct.Arc.html
28 //! Atomic types may be stored in static variables, initialized using
29 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
30 //! are often used for lazy global initialization.
34 //! All atomic types in this module are guaranteed to be [lock-free] if they're
35 //! available. This means they don't internally acquire a global mutex. Atomic
36 //! types and operations are not guaranteed to be wait-free. This means that
37 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
39 //! Atomic operations may be implemented at the instruction layer with
40 //! larger-size atomics. For example some platforms use 4-byte atomic
41 //! instructions to implement `AtomicI8`. Note that this emulation should not
42 //! have an impact on correctness of code, it's just something to be aware of.
44 //! The atomic types in this module might not be available on all platforms. The
45 //! atomic types here are all widely available, however, and can generally be
46 //! relied upon existing. Some notable exceptions are:
48 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
49 //! `AtomicI64` types.
50 //! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
51 //! and `store` operations, and do not support Compare and Swap (CAS)
52 //! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
53 //! these CAS operations are implemented via [operating system support], which
54 //! may come with a performance penalty.
55 //! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
56 //! and do not support Compare and Swap (CAS) operations, such as `swap`,
59 //! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
61 //! Note that future platforms may be added that also do not have support for
62 //! some atomic operations. Maximally portable code will want to be careful
63 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
64 //! generally the most portable, but even then they're not available everywhere.
65 //! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although
68 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
69 //! conditionally compile in code with atomics. There is an unstable
70 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
72 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
76 //! A simple spinlock:
79 //! use std::sync::Arc;
80 //! use std::sync::atomic::{AtomicUsize, Ordering};
81 //! use std::{hint, thread};
84 //! let spinlock = Arc::new(AtomicUsize::new(1));
86 //! let spinlock_clone = Arc::clone(&spinlock);
87 //! let thread = thread::spawn(move|| {
88 //! spinlock_clone.store(0, Ordering::SeqCst);
91 //! // Wait for the other thread to release the lock
92 //! while spinlock.load(Ordering::SeqCst) != 0 {
93 //! hint::spin_loop();
96 //! if let Err(panic) = thread.join() {
97 //! println!("Thread had an error: {:?}", panic);
102 //! Keep a global count of live threads:
105 //! use std::sync::atomic::{AtomicUsize, Ordering};
107 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
109 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
110 //! println!("live threads: {}", old_thread_count + 1);
113 #![stable(feature = "rust1", since = "1.0.0")]
114 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
115 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
116 #![rustc_diagnostic_item = "atomic_mod"]
118 use self::Ordering::*;
120 use crate::cell::UnsafeCell;
122 use crate::intrinsics;
124 use crate::hint::spin_loop;
126 /// A boolean type which can be safely shared between threads.
128 /// This type has the same in-memory representation as a [`bool`].
130 /// **Note**: This type is only available on platforms that support atomic
131 /// loads and stores of `u8`.
132 #[cfg(target_has_atomic_load_store = "8")]
133 #[stable(feature = "rust1", since = "1.0.0")]
135 pub struct AtomicBool {
139 #[cfg(target_has_atomic_load_store = "8")]
140 #[stable(feature = "rust1", since = "1.0.0")]
141 #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
142 impl const Default for AtomicBool {
143 /// Creates an `AtomicBool` initialized to `false`.
145 fn default() -> Self {
150 // Send is implicitly implemented for AtomicBool.
151 #[cfg(target_has_atomic_load_store = "8")]
152 #[stable(feature = "rust1", since = "1.0.0")]
153 unsafe impl Sync for AtomicBool {}
155 /// A raw pointer type which can be safely shared between threads.
157 /// This type has the same in-memory representation as a `*mut T`.
159 /// **Note**: This type is only available on platforms that support atomic
160 /// loads and stores of pointers. Its size depends on the target pointer's size.
161 #[cfg(target_has_atomic_load_store = "ptr")]
162 #[stable(feature = "rust1", since = "1.0.0")]
163 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
164 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
165 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
166 pub struct AtomicPtr<T> {
167 p: UnsafeCell<*mut T>,
170 #[cfg(target_has_atomic_load_store = "ptr")]
171 #[stable(feature = "rust1", since = "1.0.0")]
172 #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
173 impl<T> const Default for AtomicPtr<T> {
174 /// Creates a null `AtomicPtr<T>`.
175 fn default() -> AtomicPtr<T> {
176 AtomicPtr::new(crate::ptr::null_mut())
180 #[cfg(target_has_atomic_load_store = "ptr")]
181 #[stable(feature = "rust1", since = "1.0.0")]
182 unsafe impl<T> Send for AtomicPtr<T> {}
183 #[cfg(target_has_atomic_load_store = "ptr")]
184 #[stable(feature = "rust1", since = "1.0.0")]
185 unsafe impl<T> Sync for AtomicPtr<T> {}
187 /// Atomic memory orderings
189 /// Memory orderings specify the way atomic operations synchronize memory.
190 /// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
191 /// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
192 /// operations synchronize other memory while additionally preserving a total order of such
193 /// operations across all threads.
195 /// Rust's memory orderings are [the same as those of
196 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
198 /// For more information see the [nomicon].
200 /// [nomicon]: ../../../nomicon/atomics.html
201 #[stable(feature = "rust1", since = "1.0.0")]
202 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
204 #[rustc_diagnostic_item = "Ordering"]
206 /// No ordering constraints, only atomic operations.
208 /// Corresponds to [`memory_order_relaxed`] in C++20.
210 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
211 #[stable(feature = "rust1", since = "1.0.0")]
213 /// When coupled with a store, all previous operations become ordered
214 /// before any load of this value with [`Acquire`] (or stronger) ordering.
215 /// In particular, all previous writes become visible to all threads
216 /// that perform an [`Acquire`] (or stronger) load of this value.
218 /// Notice that using this ordering for an operation that combines loads
219 /// and stores leads to a [`Relaxed`] load operation!
221 /// This ordering is only applicable for operations that can perform a store.
223 /// Corresponds to [`memory_order_release`] in C++20.
225 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
226 #[stable(feature = "rust1", since = "1.0.0")]
228 /// When coupled with a load, if the loaded value was written by a store operation with
229 /// [`Release`] (or stronger) ordering, then all subsequent operations
230 /// become ordered after that store. In particular, all subsequent loads will see data
231 /// written before the store.
233 /// Notice that using this ordering for an operation that combines loads
234 /// and stores leads to a [`Relaxed`] store operation!
236 /// This ordering is only applicable for operations that can perform a load.
238 /// Corresponds to [`memory_order_acquire`] in C++20.
240 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
241 #[stable(feature = "rust1", since = "1.0.0")]
243 /// Has the effects of both [`Acquire`] and [`Release`] together:
244 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
246 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
247 /// not performing any store and hence it has just [`Acquire`] ordering. However,
248 /// `AcqRel` will never perform [`Relaxed`] accesses.
250 /// This ordering is only applicable for operations that combine both loads and stores.
252 /// Corresponds to [`memory_order_acq_rel`] in C++20.
254 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
255 #[stable(feature = "rust1", since = "1.0.0")]
257 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
258 /// operations, respectively) with the additional guarantee that all threads see all
259 /// sequentially consistent operations in the same order.
261 /// Corresponds to [`memory_order_seq_cst`] in C++20.
263 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
264 #[stable(feature = "rust1", since = "1.0.0")]
268 /// An [`AtomicBool`] initialized to `false`.
269 #[cfg(target_has_atomic_load_store = "8")]
270 #[stable(feature = "rust1", since = "1.0.0")]
273 reason = "the `new` function is now preferred",
274 suggestion = "AtomicBool::new(false)"
276 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
278 #[cfg(target_has_atomic_load_store = "8")]
280 /// Creates a new `AtomicBool`.
285 /// use std::sync::atomic::AtomicBool;
287 /// let atomic_true = AtomicBool::new(true);
288 /// let atomic_false = AtomicBool::new(false);
291 #[stable(feature = "rust1", since = "1.0.0")]
292 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
294 pub const fn new(v: bool) -> AtomicBool {
295 AtomicBool { v: UnsafeCell::new(v as u8) }
298 /// Returns a mutable reference to the underlying [`bool`].
300 /// This is safe because the mutable reference guarantees that no other threads are
301 /// concurrently accessing the atomic data.
306 /// use std::sync::atomic::{AtomicBool, Ordering};
308 /// let mut some_bool = AtomicBool::new(true);
309 /// assert_eq!(*some_bool.get_mut(), true);
310 /// *some_bool.get_mut() = false;
311 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
314 #[stable(feature = "atomic_access", since = "1.15.0")]
315 pub fn get_mut(&mut self) -> &mut bool {
316 // SAFETY: the mutable reference guarantees unique ownership.
317 unsafe { &mut *(self.v.get() as *mut bool) }
320 /// Get atomic access to a `&mut bool`.
325 /// #![feature(atomic_from_mut)]
326 /// use std::sync::atomic::{AtomicBool, Ordering};
328 /// let mut some_bool = true;
329 /// let a = AtomicBool::from_mut(&mut some_bool);
330 /// a.store(false, Ordering::Relaxed);
331 /// assert_eq!(some_bool, false);
334 #[cfg(target_has_atomic_equal_alignment = "8")]
335 #[unstable(feature = "atomic_from_mut", issue = "76314")]
336 pub fn from_mut(v: &mut bool) -> &mut Self {
337 // SAFETY: the mutable reference guarantees unique ownership, and
338 // alignment of both `bool` and `Self` is 1.
339 unsafe { &mut *(v as *mut bool as *mut Self) }
342 /// Consumes the atomic and returns the contained value.
344 /// This is safe because passing `self` by value guarantees that no other threads are
345 /// concurrently accessing the atomic data.
350 /// use std::sync::atomic::AtomicBool;
352 /// let some_bool = AtomicBool::new(true);
353 /// assert_eq!(some_bool.into_inner(), true);
356 #[stable(feature = "atomic_access", since = "1.15.0")]
357 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
358 pub const fn into_inner(self) -> bool {
359 self.v.into_inner() != 0
362 /// Loads a value from the bool.
364 /// `load` takes an [`Ordering`] argument which describes the memory ordering
365 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
369 /// Panics if `order` is [`Release`] or [`AcqRel`].
374 /// use std::sync::atomic::{AtomicBool, Ordering};
376 /// let some_bool = AtomicBool::new(true);
378 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
381 #[stable(feature = "rust1", since = "1.0.0")]
382 pub fn load(&self, order: Ordering) -> bool {
383 // SAFETY: any data races are prevented by atomic intrinsics and the raw
384 // pointer passed in is valid because we got it from a reference.
385 unsafe { atomic_load(self.v.get(), order) != 0 }
388 /// Stores a value into the bool.
390 /// `store` takes an [`Ordering`] argument which describes the memory ordering
391 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
395 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
400 /// use std::sync::atomic::{AtomicBool, Ordering};
402 /// let some_bool = AtomicBool::new(true);
404 /// some_bool.store(false, Ordering::Relaxed);
405 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
408 #[stable(feature = "rust1", since = "1.0.0")]
409 pub fn store(&self, val: bool, order: Ordering) {
410 // SAFETY: any data races are prevented by atomic intrinsics and the raw
411 // pointer passed in is valid because we got it from a reference.
413 atomic_store(self.v.get(), val as u8, order);
417 /// Stores a value into the bool, returning the previous value.
419 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
420 /// of this operation. All ordering modes are possible. Note that using
421 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
422 /// using [`Release`] makes the load part [`Relaxed`].
424 /// **Note:** This method is only available on platforms that support atomic
425 /// operations on `u8`.
430 /// use std::sync::atomic::{AtomicBool, Ordering};
432 /// let some_bool = AtomicBool::new(true);
434 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
435 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
438 #[stable(feature = "rust1", since = "1.0.0")]
439 #[cfg(target_has_atomic = "8")]
440 pub fn swap(&self, val: bool, order: Ordering) -> bool {
441 // SAFETY: data races are prevented by atomic intrinsics.
442 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
445 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
447 /// The return value is always the previous value. If it is equal to `current`, then the value
450 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
451 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
452 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
453 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
454 /// happens, and using [`Release`] makes the load part [`Relaxed`].
456 /// **Note:** This method is only available on platforms that support atomic
457 /// operations on `u8`.
459 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
461 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
462 /// memory orderings:
464 /// Original | Success | Failure
465 /// -------- | ------- | -------
466 /// Relaxed | Relaxed | Relaxed
467 /// Acquire | Acquire | Acquire
468 /// Release | Release | Relaxed
469 /// AcqRel | AcqRel | Acquire
470 /// SeqCst | SeqCst | SeqCst
472 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
473 /// which allows the compiler to generate better assembly code when the compare and swap
474 /// is used in a loop.
479 /// use std::sync::atomic::{AtomicBool, Ordering};
481 /// let some_bool = AtomicBool::new(true);
483 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
484 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
486 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
487 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
490 #[stable(feature = "rust1", since = "1.0.0")]
493 reason = "Use `compare_exchange` or `compare_exchange_weak` instead"
495 #[cfg(target_has_atomic = "8")]
496 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
497 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
503 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
505 /// The return value is a result indicating whether the new value was written and containing
506 /// the previous value. On success this value is guaranteed to be equal to `current`.
508 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
509 /// ordering of this operation. `success` describes the required ordering for the
510 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
511 /// `failure` describes the required ordering for the load operation that takes place when
512 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
513 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
514 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
515 /// and must be equivalent to or weaker than the success ordering.
517 /// **Note:** This method is only available on platforms that support atomic
518 /// operations on `u8`.
523 /// use std::sync::atomic::{AtomicBool, Ordering};
525 /// let some_bool = AtomicBool::new(true);
527 /// assert_eq!(some_bool.compare_exchange(true,
529 /// Ordering::Acquire,
530 /// Ordering::Relaxed),
532 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
534 /// assert_eq!(some_bool.compare_exchange(true, true,
535 /// Ordering::SeqCst,
536 /// Ordering::Acquire),
538 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
541 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
542 #[doc(alias = "compare_and_swap")]
543 #[cfg(target_has_atomic = "8")]
544 pub fn compare_exchange(
550 ) -> Result<bool, bool> {
551 // SAFETY: data races are prevented by atomic intrinsics.
553 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
556 Err(x) => Err(x != 0),
560 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
562 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
563 /// comparison succeeds, which can result in more efficient code on some platforms. The
564 /// return value is a result indicating whether the new value was written and containing the
567 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
568 /// ordering of this operation. `success` describes the required ordering for the
569 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
570 /// `failure` describes the required ordering for the load operation that takes place when
571 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
572 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
573 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
574 /// and must be equivalent to or weaker than the success ordering.
576 /// **Note:** This method is only available on platforms that support atomic
577 /// operations on `u8`.
582 /// use std::sync::atomic::{AtomicBool, Ordering};
584 /// let val = AtomicBool::new(false);
587 /// let mut old = val.load(Ordering::Relaxed);
589 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
591 /// Err(x) => old = x,
596 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
597 #[doc(alias = "compare_and_swap")]
598 #[cfg(target_has_atomic = "8")]
599 pub fn compare_exchange_weak(
605 ) -> Result<bool, bool> {
606 // SAFETY: data races are prevented by atomic intrinsics.
608 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
611 Err(x) => Err(x != 0),
615 /// Logical "and" with a boolean value.
617 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
618 /// the new value to the result.
620 /// Returns the previous value.
622 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
623 /// of this operation. All ordering modes are possible. Note that using
624 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
625 /// using [`Release`] makes the load part [`Relaxed`].
627 /// **Note:** This method is only available on platforms that support atomic
628 /// operations on `u8`.
633 /// use std::sync::atomic::{AtomicBool, Ordering};
635 /// let foo = AtomicBool::new(true);
636 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
637 /// assert_eq!(foo.load(Ordering::SeqCst), false);
639 /// let foo = AtomicBool::new(true);
640 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
641 /// assert_eq!(foo.load(Ordering::SeqCst), true);
643 /// let foo = AtomicBool::new(false);
644 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
645 /// assert_eq!(foo.load(Ordering::SeqCst), false);
648 #[stable(feature = "rust1", since = "1.0.0")]
649 #[cfg(target_has_atomic = "8")]
650 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
651 // SAFETY: data races are prevented by atomic intrinsics.
652 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
655 /// Logical "nand" with a boolean value.
657 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
658 /// the new value to the result.
660 /// Returns the previous value.
662 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
663 /// of this operation. All ordering modes are possible. Note that using
664 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
665 /// using [`Release`] makes the load part [`Relaxed`].
667 /// **Note:** This method is only available on platforms that support atomic
668 /// operations on `u8`.
673 /// use std::sync::atomic::{AtomicBool, Ordering};
675 /// let foo = AtomicBool::new(true);
676 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
677 /// assert_eq!(foo.load(Ordering::SeqCst), true);
679 /// let foo = AtomicBool::new(true);
680 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
681 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
682 /// assert_eq!(foo.load(Ordering::SeqCst), false);
684 /// let foo = AtomicBool::new(false);
685 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
686 /// assert_eq!(foo.load(Ordering::SeqCst), true);
689 #[stable(feature = "rust1", since = "1.0.0")]
690 #[cfg(target_has_atomic = "8")]
691 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
692 // We can't use atomic_nand here because it can result in a bool with
693 // an invalid value. This happens because the atomic operation is done
694 // with an 8-bit integer internally, which would set the upper 7 bits.
695 // So we just use fetch_xor or swap instead.
698 // We must invert the bool.
699 self.fetch_xor(true, order)
701 // !(x & false) == true
702 // We must set the bool to true.
703 self.swap(true, order)
707 /// Logical "or" with a boolean value.
709 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
710 /// new value to the result.
712 /// Returns the previous value.
714 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
715 /// of this operation. All ordering modes are possible. Note that using
716 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
717 /// using [`Release`] makes the load part [`Relaxed`].
719 /// **Note:** This method is only available on platforms that support atomic
720 /// operations on `u8`.
725 /// use std::sync::atomic::{AtomicBool, Ordering};
727 /// let foo = AtomicBool::new(true);
728 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
729 /// assert_eq!(foo.load(Ordering::SeqCst), true);
731 /// let foo = AtomicBool::new(true);
732 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
733 /// assert_eq!(foo.load(Ordering::SeqCst), true);
735 /// let foo = AtomicBool::new(false);
736 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
737 /// assert_eq!(foo.load(Ordering::SeqCst), false);
740 #[stable(feature = "rust1", since = "1.0.0")]
741 #[cfg(target_has_atomic = "8")]
742 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
743 // SAFETY: data races are prevented by atomic intrinsics.
744 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
747 /// Logical "xor" with a boolean value.
749 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
750 /// the new value to the result.
752 /// Returns the previous value.
754 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
755 /// of this operation. All ordering modes are possible. Note that using
756 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
757 /// using [`Release`] makes the load part [`Relaxed`].
759 /// **Note:** This method is only available on platforms that support atomic
760 /// operations on `u8`.
765 /// use std::sync::atomic::{AtomicBool, Ordering};
767 /// let foo = AtomicBool::new(true);
768 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
769 /// assert_eq!(foo.load(Ordering::SeqCst), true);
771 /// let foo = AtomicBool::new(true);
772 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
773 /// assert_eq!(foo.load(Ordering::SeqCst), false);
775 /// let foo = AtomicBool::new(false);
776 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
777 /// assert_eq!(foo.load(Ordering::SeqCst), false);
780 #[stable(feature = "rust1", since = "1.0.0")]
781 #[cfg(target_has_atomic = "8")]
782 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
783 // SAFETY: data races are prevented by atomic intrinsics.
784 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
787 /// Returns a mutable pointer to the underlying [`bool`].
789 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
790 /// This method is mostly useful for FFI, where the function signature may use
791 /// `*mut bool` instead of `&AtomicBool`.
793 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
794 /// atomic types work with interior mutability. All modifications of an atomic change the value
795 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
796 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
797 /// restriction: operations on it must be atomic.
801 /// ```ignore (extern-declaration)
803 /// use std::sync::atomic::AtomicBool;
805 /// fn my_atomic_op(arg: *mut bool);
808 /// let mut atomic = AtomicBool::new(true);
810 /// my_atomic_op(atomic.as_mut_ptr());
815 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
816 pub fn as_mut_ptr(&self) -> *mut bool {
817 self.v.get() as *mut bool
820 /// Fetches the value, and applies a function to it that returns an optional
821 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
822 /// returned `Some(_)`, else `Err(previous_value)`.
824 /// Note: This may call the function multiple times if the value has been
825 /// changed from other threads in the meantime, as long as the function
826 /// returns `Some(_)`, but the function will have been applied only once to
827 /// the stored value.
829 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
830 /// ordering of this operation. The first describes the required ordering for
831 /// when the operation finally succeeds while the second describes the
832 /// required ordering for loads. These correspond to the success and failure
833 /// orderings of [`AtomicBool::compare_exchange`] respectively.
835 /// Using [`Acquire`] as success ordering makes the store part of this
836 /// operation [`Relaxed`], and using [`Release`] makes the final successful
837 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
838 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
839 /// success ordering.
841 /// **Note:** This method is only available on platforms that support atomic
842 /// operations on `u8`.
847 /// use std::sync::atomic::{AtomicBool, Ordering};
849 /// let x = AtomicBool::new(false);
850 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
851 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
852 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
853 /// assert_eq!(x.load(Ordering::SeqCst), false);
856 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
857 #[cfg(target_has_atomic = "8")]
858 pub fn fetch_update<F>(
861 fetch_order: Ordering,
863 ) -> Result<bool, bool>
865 F: FnMut(bool) -> Option<bool>,
867 let mut prev = self.load(fetch_order);
868 while let Some(next) = f(prev) {
869 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
870 x @ Ok(_) => return x,
871 Err(next_prev) => prev = next_prev,
878 #[cfg(target_has_atomic_load_store = "ptr")]
879 impl<T> AtomicPtr<T> {
880 /// Creates a new `AtomicPtr`.
885 /// use std::sync::atomic::AtomicPtr;
887 /// let ptr = &mut 5;
888 /// let atomic_ptr = AtomicPtr::new(ptr);
891 #[stable(feature = "rust1", since = "1.0.0")]
892 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
893 pub const fn new(p: *mut T) -> AtomicPtr<T> {
894 AtomicPtr { p: UnsafeCell::new(p) }
897 /// Returns a mutable reference to the underlying pointer.
899 /// This is safe because the mutable reference guarantees that no other threads are
900 /// concurrently accessing the atomic data.
905 /// use std::sync::atomic::{AtomicPtr, Ordering};
907 /// let mut data = 10;
908 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
909 /// let mut other_data = 5;
910 /// *atomic_ptr.get_mut() = &mut other_data;
911 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
914 #[stable(feature = "atomic_access", since = "1.15.0")]
915 pub fn get_mut(&mut self) -> &mut *mut T {
919 /// Get atomic access to a pointer.
924 /// #![feature(atomic_from_mut)]
925 /// use std::sync::atomic::{AtomicPtr, Ordering};
927 /// let mut data = 123;
928 /// let mut some_ptr = &mut data as *mut i32;
929 /// let a = AtomicPtr::from_mut(&mut some_ptr);
930 /// let mut other_data = 456;
931 /// a.store(&mut other_data, Ordering::Relaxed);
932 /// assert_eq!(unsafe { *some_ptr }, 456);
935 #[cfg(target_has_atomic_equal_alignment = "ptr")]
936 #[unstable(feature = "atomic_from_mut", issue = "76314")]
937 pub fn from_mut(v: &mut *mut T) -> &mut Self {
938 use crate::mem::align_of;
939 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
941 // - the mutable reference guarantees unique ownership.
942 // - the alignment of `*mut T` and `Self` is the same on all platforms
943 // supported by rust, as verified above.
944 unsafe { &mut *(v as *mut *mut T as *mut Self) }
947 /// Consumes the atomic and returns the contained value.
949 /// This is safe because passing `self` by value guarantees that no other threads are
950 /// concurrently accessing the atomic data.
955 /// use std::sync::atomic::AtomicPtr;
957 /// let mut data = 5;
958 /// let atomic_ptr = AtomicPtr::new(&mut data);
959 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
962 #[stable(feature = "atomic_access", since = "1.15.0")]
963 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
964 pub const fn into_inner(self) -> *mut T {
968 /// Loads a value from the pointer.
970 /// `load` takes an [`Ordering`] argument which describes the memory ordering
971 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
975 /// Panics if `order` is [`Release`] or [`AcqRel`].
980 /// use std::sync::atomic::{AtomicPtr, Ordering};
982 /// let ptr = &mut 5;
983 /// let some_ptr = AtomicPtr::new(ptr);
985 /// let value = some_ptr.load(Ordering::Relaxed);
988 #[stable(feature = "rust1", since = "1.0.0")]
989 pub fn load(&self, order: Ordering) -> *mut T {
990 // SAFETY: data races are prevented by atomic intrinsics.
991 unsafe { atomic_load(self.p.get(), order) }
994 /// Stores a value into the pointer.
996 /// `store` takes an [`Ordering`] argument which describes the memory ordering
997 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1001 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1006 /// use std::sync::atomic::{AtomicPtr, Ordering};
1008 /// let ptr = &mut 5;
1009 /// let some_ptr = AtomicPtr::new(ptr);
1011 /// let other_ptr = &mut 10;
1013 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1016 #[stable(feature = "rust1", since = "1.0.0")]
1017 pub fn store(&self, ptr: *mut T, order: Ordering) {
1018 // SAFETY: data races are prevented by atomic intrinsics.
1020 atomic_store(self.p.get(), ptr, order);
1024 /// Stores a value into the pointer, returning the previous value.
1026 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1027 /// of this operation. All ordering modes are possible. Note that using
1028 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1029 /// using [`Release`] makes the load part [`Relaxed`].
1031 /// **Note:** This method is only available on platforms that support atomic
1032 /// operations on pointers.
1037 /// use std::sync::atomic::{AtomicPtr, Ordering};
1039 /// let ptr = &mut 5;
1040 /// let some_ptr = AtomicPtr::new(ptr);
1042 /// let other_ptr = &mut 10;
1044 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1047 #[stable(feature = "rust1", since = "1.0.0")]
1048 #[cfg(target_has_atomic = "ptr")]
1049 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1050 // SAFETY: data races are prevented by atomic intrinsics.
1051 unsafe { atomic_swap(self.p.get(), ptr, order) }
1054 /// Stores a value into the pointer if the current value is the same as the `current` value.
1056 /// The return value is always the previous value. If it is equal to `current`, then the value
1059 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1060 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1061 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1062 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1063 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1065 /// **Note:** This method is only available on platforms that support atomic
1066 /// operations on pointers.
1068 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1070 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1071 /// memory orderings:
1073 /// Original | Success | Failure
1074 /// -------- | ------- | -------
1075 /// Relaxed | Relaxed | Relaxed
1076 /// Acquire | Acquire | Acquire
1077 /// Release | Release | Relaxed
1078 /// AcqRel | AcqRel | Acquire
1079 /// SeqCst | SeqCst | SeqCst
1081 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1082 /// which allows the compiler to generate better assembly code when the compare and swap
1083 /// is used in a loop.
1088 /// use std::sync::atomic::{AtomicPtr, Ordering};
1090 /// let ptr = &mut 5;
1091 /// let some_ptr = AtomicPtr::new(ptr);
1093 /// let other_ptr = &mut 10;
1095 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1098 #[stable(feature = "rust1", since = "1.0.0")]
1101 reason = "Use `compare_exchange` or `compare_exchange_weak` instead"
1103 #[cfg(target_has_atomic = "ptr")]
1104 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1105 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1111 /// Stores a value into the pointer if the current value is the same as the `current` value.
1113 /// The return value is a result indicating whether the new value was written and containing
1114 /// the previous value. On success this value is guaranteed to be equal to `current`.
1116 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1117 /// ordering of this operation. `success` describes the required ordering for the
1118 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1119 /// `failure` describes the required ordering for the load operation that takes place when
1120 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1121 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1122 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1123 /// and must be equivalent to or weaker than the success ordering.
1125 /// **Note:** This method is only available on platforms that support atomic
1126 /// operations on pointers.
1131 /// use std::sync::atomic::{AtomicPtr, Ordering};
1133 /// let ptr = &mut 5;
1134 /// let some_ptr = AtomicPtr::new(ptr);
1136 /// let other_ptr = &mut 10;
1138 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1139 /// Ordering::SeqCst, Ordering::Relaxed);
1142 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1143 #[cfg(target_has_atomic = "ptr")]
1144 pub fn compare_exchange(
1150 ) -> Result<*mut T, *mut T> {
1151 // SAFETY: data races are prevented by atomic intrinsics.
1152 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1155 /// Stores a value into the pointer if the current value is the same as the `current` value.
1157 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1158 /// comparison succeeds, which can result in more efficient code on some platforms. The
1159 /// return value is a result indicating whether the new value was written and containing the
1162 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1163 /// ordering of this operation. `success` describes the required ordering for the
1164 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1165 /// `failure` describes the required ordering for the load operation that takes place when
1166 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1167 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1168 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1169 /// and must be equivalent to or weaker than the success ordering.
1171 /// **Note:** This method is only available on platforms that support atomic
1172 /// operations on pointers.
1177 /// use std::sync::atomic::{AtomicPtr, Ordering};
1179 /// let some_ptr = AtomicPtr::new(&mut 5);
1181 /// let new = &mut 10;
1182 /// let mut old = some_ptr.load(Ordering::Relaxed);
1184 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1186 /// Err(x) => old = x,
1191 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1192 #[cfg(target_has_atomic = "ptr")]
1193 pub fn compare_exchange_weak(
1199 ) -> Result<*mut T, *mut T> {
1200 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1201 // but we know for sure that the pointer is valid (we just got it from
1202 // an `UnsafeCell` that we have by reference) and the atomic operation
1203 // itself allows us to safely mutate the `UnsafeCell` contents.
1204 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
1207 /// Fetches the value, and applies a function to it that returns an optional
1208 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1209 /// returned `Some(_)`, else `Err(previous_value)`.
1211 /// Note: This may call the function multiple times if the value has been
1212 /// changed from other threads in the meantime, as long as the function
1213 /// returns `Some(_)`, but the function will have been applied only once to
1214 /// the stored value.
1216 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1217 /// ordering of this operation. The first describes the required ordering for
1218 /// when the operation finally succeeds while the second describes the
1219 /// required ordering for loads. These correspond to the success and failure
1220 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
1222 /// Using [`Acquire`] as success ordering makes the store part of this
1223 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1224 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1225 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
1226 /// success ordering.
1228 /// **Note:** This method is only available on platforms that support atomic
1229 /// operations on pointers.
1234 /// use std::sync::atomic::{AtomicPtr, Ordering};
1236 /// let ptr: *mut _ = &mut 5;
1237 /// let some_ptr = AtomicPtr::new(ptr);
1239 /// let new: *mut _ = &mut 10;
1240 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
1241 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
1248 /// assert_eq!(result, Ok(ptr));
1249 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
1252 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
1253 #[cfg(target_has_atomic = "ptr")]
1254 pub fn fetch_update<F>(
1256 set_order: Ordering,
1257 fetch_order: Ordering,
1259 ) -> Result<*mut T, *mut T>
1261 F: FnMut(*mut T) -> Option<*mut T>,
1263 let mut prev = self.load(fetch_order);
1264 while let Some(next) = f(prev) {
1265 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1266 x @ Ok(_) => return x,
1267 Err(next_prev) => prev = next_prev,
1274 #[cfg(target_has_atomic_load_store = "8")]
1275 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1276 #[rustc_const_unstable(feature = "const_convert", issue = "88674")]
1277 impl const From<bool> for AtomicBool {
1278 /// Converts a `bool` into an `AtomicBool`.
1283 /// use std::sync::atomic::AtomicBool;
1284 /// let atomic_bool = AtomicBool::from(true);
1285 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1288 fn from(b: bool) -> Self {
1293 #[cfg(target_has_atomic_load_store = "ptr")]
1294 #[stable(feature = "atomic_from", since = "1.23.0")]
1295 #[rustc_const_unstable(feature = "const_convert", issue = "88674")]
1296 impl<T> const From<*mut T> for AtomicPtr<T> {
1298 fn from(p: *mut T) -> Self {
1303 #[allow(unused_macros)] // This macro ends up being unused on some architectures.
1304 macro_rules! if_not_8_bit {
1305 (u8, $($tt:tt)*) => { "" };
1306 (i8, $($tt:tt)*) => { "" };
1307 ($_:ident, $($tt:tt)*) => { $($tt)* };
1310 #[cfg(target_has_atomic_load_store = "8")]
1311 macro_rules! atomic_int {
1317 $stable_access:meta,
1321 $stable_init_const:meta,
1322 $s_int_type:literal,
1323 $extra_feature:expr,
1324 $min_fn:ident, $max_fn:ident,
1327 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1328 /// An integer type which can be safely shared between threads.
1330 /// This type has the same in-memory representation as the underlying
1331 /// integer type, [`
1332 #[doc = $s_int_type]
1333 /// `]. For more about the differences between atomic types and
1334 /// non-atomic types as well as information about the portability of
1335 /// this type, please see the [module-level documentation].
1337 /// **Note:** This type is only available on platforms that support
1338 /// atomic loads and stores of [`
1339 #[doc = $s_int_type]
1342 /// [module-level documentation]: crate::sync::atomic
1344 #[repr(C, align($align))]
1345 pub struct $atomic_type {
1346 v: UnsafeCell<$int_type>,
1349 /// An atomic integer initialized to `0`.
1350 #[$stable_init_const]
1353 reason = "the `new` function is now preferred",
1354 suggestion = $atomic_new,
1356 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1359 #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
1360 impl const Default for $atomic_type {
1362 fn default() -> Self {
1363 Self::new(Default::default())
1368 #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
1369 impl const From<$int_type> for $atomic_type {
1370 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
1372 fn from(v: $int_type) -> Self { Self::new(v) }
1376 impl fmt::Debug for $atomic_type {
1377 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1378 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1382 // Send is implicitly implemented.
1384 unsafe impl Sync for $atomic_type {}
1387 /// Creates a new atomic integer.
1392 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1394 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
1400 pub const fn new(v: $int_type) -> Self {
1401 Self {v: UnsafeCell::new(v)}
1404 /// Returns a mutable reference to the underlying integer.
1406 /// This is safe because the mutable reference guarantees that no other threads are
1407 /// concurrently accessing the atomic data.
1412 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1414 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
1415 /// assert_eq!(*some_var.get_mut(), 10);
1416 /// *some_var.get_mut() = 5;
1417 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
1421 pub fn get_mut(&mut self) -> &mut $int_type {
1425 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
1427 #[doc = if_not_8_bit! {
1430 "**Note:** This function is only available on targets where `",
1431 stringify!($int_type), "` has an alignment of ", $align, " bytes."
1438 /// #![feature(atomic_from_mut)]
1439 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1441 /// let mut some_int = 123;
1442 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
1443 /// a.store(100, Ordering::Relaxed);
1444 /// assert_eq!(some_int, 100);
1449 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1450 pub fn from_mut(v: &mut $int_type) -> &mut Self {
1451 use crate::mem::align_of;
1452 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
1454 // - the mutable reference guarantees unique ownership.
1455 // - the alignment of `$int_type` and `Self` is the
1456 // same, as promised by $cfg_align and verified above.
1457 unsafe { &mut *(v as *mut $int_type as *mut Self) }
1460 /// Consumes the atomic and returns the contained value.
1462 /// This is safe because passing `self` by value guarantees that no other threads are
1463 /// concurrently accessing the atomic data.
1468 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1470 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1471 /// assert_eq!(some_var.into_inner(), 5);
1475 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
1476 pub const fn into_inner(self) -> $int_type {
1480 /// Loads a value from the atomic integer.
1482 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1483 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1487 /// Panics if `order` is [`Release`] or [`AcqRel`].
1492 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1494 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1496 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
1500 pub fn load(&self, order: Ordering) -> $int_type {
1501 // SAFETY: data races are prevented by atomic intrinsics.
1502 unsafe { atomic_load(self.v.get(), order) }
1505 /// Stores a value into the atomic integer.
1507 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1508 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1512 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1517 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1519 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1521 /// some_var.store(10, Ordering::Relaxed);
1522 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1526 pub fn store(&self, val: $int_type, order: Ordering) {
1527 // SAFETY: data races are prevented by atomic intrinsics.
1528 unsafe { atomic_store(self.v.get(), val, order); }
1531 /// Stores a value into the atomic integer, returning the previous value.
1533 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1534 /// of this operation. All ordering modes are possible. Note that using
1535 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1536 /// using [`Release`] makes the load part [`Relaxed`].
1538 /// **Note**: This method is only available on platforms that support atomic operations on
1539 #[doc = concat!("[`", $s_int_type, "`].")]
1544 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1546 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1548 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1553 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1554 // SAFETY: data races are prevented by atomic intrinsics.
1555 unsafe { atomic_swap(self.v.get(), val, order) }
1558 /// Stores a value into the atomic integer if the current value is the same as
1559 /// the `current` value.
1561 /// The return value is always the previous value. If it is equal to `current`, then the
1562 /// value was updated.
1564 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1565 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1566 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1567 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1568 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1570 /// **Note**: This method is only available on platforms that support atomic operations on
1571 #[doc = concat!("[`", $s_int_type, "`].")]
1573 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1575 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1576 /// memory orderings:
1578 /// Original | Success | Failure
1579 /// -------- | ------- | -------
1580 /// Relaxed | Relaxed | Relaxed
1581 /// Acquire | Acquire | Acquire
1582 /// Release | Release | Relaxed
1583 /// AcqRel | AcqRel | Acquire
1584 /// SeqCst | SeqCst | SeqCst
1586 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1587 /// which allows the compiler to generate better assembly code when the compare and swap
1588 /// is used in a loop.
1593 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1595 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1597 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1598 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1600 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1601 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1607 reason = "Use `compare_exchange` or `compare_exchange_weak` instead")
1610 pub fn compare_and_swap(&self,
1613 order: Ordering) -> $int_type {
1614 match self.compare_exchange(current,
1617 strongest_failure_ordering(order)) {
1623 /// Stores a value into the atomic integer if the current value is the same as
1624 /// the `current` value.
1626 /// The return value is a result indicating whether the new value was written and
1627 /// containing the previous value. On success this value is guaranteed to be equal to
1630 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1631 /// ordering of this operation. `success` describes the required ordering for the
1632 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1633 /// `failure` describes the required ordering for the load operation that takes place when
1634 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1635 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1636 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1637 /// and must be equivalent to or weaker than the success ordering.
1639 /// **Note**: This method is only available on platforms that support atomic operations on
1640 #[doc = concat!("[`", $s_int_type, "`].")]
1645 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1647 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1649 /// assert_eq!(some_var.compare_exchange(5, 10,
1650 /// Ordering::Acquire,
1651 /// Ordering::Relaxed),
1653 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1655 /// assert_eq!(some_var.compare_exchange(6, 12,
1656 /// Ordering::SeqCst,
1657 /// Ordering::Acquire),
1659 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1664 pub fn compare_exchange(&self,
1668 failure: Ordering) -> Result<$int_type, $int_type> {
1669 // SAFETY: data races are prevented by atomic intrinsics.
1670 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1673 /// Stores a value into the atomic integer if the current value is the same as
1674 /// the `current` value.
1676 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
1677 /// this function is allowed to spuriously fail even
1678 /// when the comparison succeeds, which can result in more efficient code on some
1679 /// platforms. The return value is a result indicating whether the new value was
1680 /// written and containing the previous value.
1682 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1683 /// ordering of this operation. `success` describes the required ordering for the
1684 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1685 /// `failure` describes the required ordering for the load operation that takes place when
1686 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1687 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1688 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1689 /// and must be equivalent to or weaker than the success ordering.
1691 /// **Note**: This method is only available on platforms that support atomic operations on
1692 #[doc = concat!("[`", $s_int_type, "`].")]
1697 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1699 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
1701 /// let mut old = val.load(Ordering::Relaxed);
1703 /// let new = old * 2;
1704 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1706 /// Err(x) => old = x,
1713 pub fn compare_exchange_weak(&self,
1717 failure: Ordering) -> Result<$int_type, $int_type> {
1718 // SAFETY: data races are prevented by atomic intrinsics.
1720 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1724 /// Adds to the current value, returning the previous value.
1726 /// This operation wraps around on overflow.
1728 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1729 /// of this operation. All ordering modes are possible. Note that using
1730 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1731 /// using [`Release`] makes the load part [`Relaxed`].
1733 /// **Note**: This method is only available on platforms that support atomic operations on
1734 #[doc = concat!("[`", $s_int_type, "`].")]
1739 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1741 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
1742 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1743 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1748 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1749 // SAFETY: data races are prevented by atomic intrinsics.
1750 unsafe { atomic_add(self.v.get(), val, order) }
1753 /// Subtracts from the current value, returning the previous value.
1755 /// This operation wraps around on overflow.
1757 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1758 /// of this operation. All ordering modes are possible. Note that using
1759 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1760 /// using [`Release`] makes the load part [`Relaxed`].
1762 /// **Note**: This method is only available on platforms that support atomic operations on
1763 #[doc = concat!("[`", $s_int_type, "`].")]
1768 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1770 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
1771 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1772 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1777 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1778 // SAFETY: data races are prevented by atomic intrinsics.
1779 unsafe { atomic_sub(self.v.get(), val, order) }
1782 /// Bitwise "and" with the current value.
1784 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1785 /// sets the new value to the result.
1787 /// Returns the previous value.
1789 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1790 /// of this operation. All ordering modes are possible. Note that using
1791 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1792 /// using [`Release`] makes the load part [`Relaxed`].
1794 /// **Note**: This method is only available on platforms that support atomic operations on
1795 #[doc = concat!("[`", $s_int_type, "`].")]
1800 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1802 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1803 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1804 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1809 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1810 // SAFETY: data races are prevented by atomic intrinsics.
1811 unsafe { atomic_and(self.v.get(), val, order) }
1814 /// Bitwise "nand" with the current value.
1816 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
1817 /// sets the new value to the result.
1819 /// Returns the previous value.
1821 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1822 /// of this operation. All ordering modes are possible. Note that using
1823 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1824 /// using [`Release`] makes the load part [`Relaxed`].
1826 /// **Note**: This method is only available on platforms that support atomic operations on
1827 #[doc = concat!("[`", $s_int_type, "`].")]
1832 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1834 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
1835 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1836 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1841 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1842 // SAFETY: data races are prevented by atomic intrinsics.
1843 unsafe { atomic_nand(self.v.get(), val, order) }
1846 /// Bitwise "or" with the current value.
1848 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1849 /// sets the new value to the result.
1851 /// Returns the previous value.
1853 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1854 /// of this operation. All ordering modes are possible. Note that using
1855 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1856 /// using [`Release`] makes the load part [`Relaxed`].
1858 /// **Note**: This method is only available on platforms that support atomic operations on
1859 #[doc = concat!("[`", $s_int_type, "`].")]
1864 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1866 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1867 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1868 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1873 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1874 // SAFETY: data races are prevented by atomic intrinsics.
1875 unsafe { atomic_or(self.v.get(), val, order) }
1878 /// Bitwise "xor" with the current value.
1880 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1881 /// sets the new value to the result.
1883 /// Returns the previous value.
1885 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1886 /// of this operation. All ordering modes are possible. Note that using
1887 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1888 /// using [`Release`] makes the load part [`Relaxed`].
1890 /// **Note**: This method is only available on platforms that support atomic operations on
1891 #[doc = concat!("[`", $s_int_type, "`].")]
1896 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1898 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1899 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1900 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1905 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1906 // SAFETY: data races are prevented by atomic intrinsics.
1907 unsafe { atomic_xor(self.v.get(), val, order) }
1910 /// Fetches the value, and applies a function to it that returns an optional
1911 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1912 /// `Err(previous_value)`.
1914 /// Note: This may call the function multiple times if the value has been changed from other threads in
1915 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1916 /// only once to the stored value.
1918 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
1919 /// The first describes the required ordering for when the operation finally succeeds while the second
1920 /// describes the required ordering for loads. These correspond to the success and failure orderings of
1921 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
1924 /// Using [`Acquire`] as success ordering makes the store part
1925 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1926 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1927 /// and must be equivalent to or weaker than the success ordering.
1929 /// **Note**: This method is only available on platforms that support atomic operations on
1930 #[doc = concat!("[`", $s_int_type, "`].")]
1935 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1937 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
1938 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
1939 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
1940 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
1941 /// assert_eq!(x.load(Ordering::SeqCst), 9);
1944 #[stable(feature = "no_more_cas", since = "1.45.0")]
1946 pub fn fetch_update<F>(&self,
1947 set_order: Ordering,
1948 fetch_order: Ordering,
1949 mut f: F) -> Result<$int_type, $int_type>
1950 where F: FnMut($int_type) -> Option<$int_type> {
1951 let mut prev = self.load(fetch_order);
1952 while let Some(next) = f(prev) {
1953 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1954 x @ Ok(_) => return x,
1955 Err(next_prev) => prev = next_prev
1961 /// Maximum with the current value.
1963 /// Finds the maximum of the current value and the argument `val`, and
1964 /// sets the new value to the result.
1966 /// Returns the previous value.
1968 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1969 /// of this operation. All ordering modes are possible. Note that using
1970 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1971 /// using [`Release`] makes the load part [`Relaxed`].
1973 /// **Note**: This method is only available on platforms that support atomic operations on
1974 #[doc = concat!("[`", $s_int_type, "`].")]
1979 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1981 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
1982 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1983 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
1986 /// If you want to obtain the maximum value in one step, you can use the following:
1989 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1991 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
1993 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1994 /// assert!(max_foo == 42);
1997 #[stable(feature = "atomic_min_max", since = "1.45.0")]
1999 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
2000 // SAFETY: data races are prevented by atomic intrinsics.
2001 unsafe { $max_fn(self.v.get(), val, order) }
2004 /// Minimum with the current value.
2006 /// Finds the minimum of the current value and the argument `val`, and
2007 /// sets the new value to the result.
2009 /// Returns the previous value.
2011 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
2012 /// of this operation. All ordering modes are possible. Note that using
2013 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2014 /// using [`Release`] makes the load part [`Relaxed`].
2016 /// **Note**: This method is only available on platforms that support atomic operations on
2017 #[doc = concat!("[`", $s_int_type, "`].")]
2022 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2024 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2025 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
2026 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
2027 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
2028 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
2031 /// If you want to obtain the minimum value in one step, you can use the following:
2034 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2036 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2038 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
2039 /// assert_eq!(min_foo, 12);
2042 #[stable(feature = "atomic_min_max", since = "1.45.0")]
2044 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
2045 // SAFETY: data races are prevented by atomic intrinsics.
2046 unsafe { $min_fn(self.v.get(), val, order) }
2049 /// Returns a mutable pointer to the underlying integer.
2051 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
2052 /// This method is mostly useful for FFI, where the function signature may use
2053 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
2055 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2056 /// atomic types work with interior mutability. All modifications of an atomic change the value
2057 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2058 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
2059 /// restriction: operations on it must be atomic.
2063 /// ```ignore (extern-declaration)
2065 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2068 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
2071 #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
2073 // SAFETY: Safe as long as `my_atomic_op` is atomic.
2075 /// my_atomic_op(atomic.as_mut_ptr());
2080 #[unstable(feature = "atomic_mut_ptr",
2081 reason = "recently added",
2083 pub fn as_mut_ptr(&self) -> *mut $int_type {
2090 #[cfg(target_has_atomic_load_store = "8")]
2092 cfg(target_has_atomic = "8"),
2093 cfg(target_has_atomic_equal_alignment = "8"),
2094 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2095 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2096 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2097 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2098 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2099 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2100 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2101 unstable(feature = "integer_atomics", issue = "32976"),
2104 atomic_min, atomic_max,
2107 i8 AtomicI8 ATOMIC_I8_INIT
2109 #[cfg(target_has_atomic_load_store = "8")]
2111 cfg(target_has_atomic = "8"),
2112 cfg(target_has_atomic_equal_alignment = "8"),
2113 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2114 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2115 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2116 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2117 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2118 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2119 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2120 unstable(feature = "integer_atomics", issue = "32976"),
2123 atomic_umin, atomic_umax,
2126 u8 AtomicU8 ATOMIC_U8_INIT
2128 #[cfg(target_has_atomic_load_store = "16")]
2130 cfg(target_has_atomic = "16"),
2131 cfg(target_has_atomic_equal_alignment = "16"),
2132 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2133 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2134 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2135 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2136 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2137 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2138 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2139 unstable(feature = "integer_atomics", issue = "32976"),
2142 atomic_min, atomic_max,
2144 "AtomicI16::new(0)",
2145 i16 AtomicI16 ATOMIC_I16_INIT
2147 #[cfg(target_has_atomic_load_store = "16")]
2149 cfg(target_has_atomic = "16"),
2150 cfg(target_has_atomic_equal_alignment = "16"),
2151 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2152 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2153 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2154 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2155 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2156 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2157 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2158 unstable(feature = "integer_atomics", issue = "32976"),
2161 atomic_umin, atomic_umax,
2163 "AtomicU16::new(0)",
2164 u16 AtomicU16 ATOMIC_U16_INIT
2166 #[cfg(target_has_atomic_load_store = "32")]
2168 cfg(target_has_atomic = "32"),
2169 cfg(target_has_atomic_equal_alignment = "32"),
2170 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2171 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2172 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2173 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2174 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2175 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2176 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2177 unstable(feature = "integer_atomics", issue = "32976"),
2180 atomic_min, atomic_max,
2182 "AtomicI32::new(0)",
2183 i32 AtomicI32 ATOMIC_I32_INIT
2185 #[cfg(target_has_atomic_load_store = "32")]
2187 cfg(target_has_atomic = "32"),
2188 cfg(target_has_atomic_equal_alignment = "32"),
2189 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2190 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2191 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2192 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2193 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2194 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2195 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2196 unstable(feature = "integer_atomics", issue = "32976"),
2199 atomic_umin, atomic_umax,
2201 "AtomicU32::new(0)",
2202 u32 AtomicU32 ATOMIC_U32_INIT
2204 #[cfg(target_has_atomic_load_store = "64")]
2206 cfg(target_has_atomic = "64"),
2207 cfg(target_has_atomic_equal_alignment = "64"),
2208 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2209 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2210 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2211 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2212 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2213 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2214 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2215 unstable(feature = "integer_atomics", issue = "32976"),
2218 atomic_min, atomic_max,
2220 "AtomicI64::new(0)",
2221 i64 AtomicI64 ATOMIC_I64_INIT
2223 #[cfg(target_has_atomic_load_store = "64")]
2225 cfg(target_has_atomic = "64"),
2226 cfg(target_has_atomic_equal_alignment = "64"),
2227 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2228 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2229 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2230 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2231 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2232 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2233 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2234 unstable(feature = "integer_atomics", issue = "32976"),
2237 atomic_umin, atomic_umax,
2239 "AtomicU64::new(0)",
2240 u64 AtomicU64 ATOMIC_U64_INIT
2242 #[cfg(target_has_atomic_load_store = "128")]
2244 cfg(target_has_atomic = "128"),
2245 cfg(target_has_atomic_equal_alignment = "128"),
2246 unstable(feature = "integer_atomics", issue = "32976"),
2247 unstable(feature = "integer_atomics", issue = "32976"),
2248 unstable(feature = "integer_atomics", issue = "32976"),
2249 unstable(feature = "integer_atomics", issue = "32976"),
2250 unstable(feature = "integer_atomics", issue = "32976"),
2251 unstable(feature = "integer_atomics", issue = "32976"),
2252 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2253 unstable(feature = "integer_atomics", issue = "32976"),
2255 "#![feature(integer_atomics)]\n\n",
2256 atomic_min, atomic_max,
2258 "AtomicI128::new(0)",
2259 i128 AtomicI128 ATOMIC_I128_INIT
2261 #[cfg(target_has_atomic_load_store = "128")]
2263 cfg(target_has_atomic = "128"),
2264 cfg(target_has_atomic_equal_alignment = "128"),
2265 unstable(feature = "integer_atomics", issue = "32976"),
2266 unstable(feature = "integer_atomics", issue = "32976"),
2267 unstable(feature = "integer_atomics", issue = "32976"),
2268 unstable(feature = "integer_atomics", issue = "32976"),
2269 unstable(feature = "integer_atomics", issue = "32976"),
2270 unstable(feature = "integer_atomics", issue = "32976"),
2271 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2272 unstable(feature = "integer_atomics", issue = "32976"),
2274 "#![feature(integer_atomics)]\n\n",
2275 atomic_umin, atomic_umax,
2277 "AtomicU128::new(0)",
2278 u128 AtomicU128 ATOMIC_U128_INIT
2281 macro_rules! atomic_int_ptr_sized {
2282 ( $($target_pointer_width:literal $align:literal)* ) => { $(
2283 #[cfg(target_has_atomic_load_store = "ptr")]
2284 #[cfg(target_pointer_width = $target_pointer_width)]
2286 cfg(target_has_atomic = "ptr"),
2287 cfg(target_has_atomic_equal_alignment = "ptr"),
2288 stable(feature = "rust1", since = "1.0.0"),
2289 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2290 stable(feature = "atomic_debug", since = "1.3.0"),
2291 stable(feature = "atomic_access", since = "1.15.0"),
2292 stable(feature = "atomic_from", since = "1.23.0"),
2293 stable(feature = "atomic_nand", since = "1.27.0"),
2294 rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"),
2295 stable(feature = "rust1", since = "1.0.0"),
2298 atomic_min, atomic_max,
2300 "AtomicIsize::new(0)",
2301 isize AtomicIsize ATOMIC_ISIZE_INIT
2303 #[cfg(target_has_atomic_load_store = "ptr")]
2304 #[cfg(target_pointer_width = $target_pointer_width)]
2306 cfg(target_has_atomic = "ptr"),
2307 cfg(target_has_atomic_equal_alignment = "ptr"),
2308 stable(feature = "rust1", since = "1.0.0"),
2309 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2310 stable(feature = "atomic_debug", since = "1.3.0"),
2311 stable(feature = "atomic_access", since = "1.15.0"),
2312 stable(feature = "atomic_from", since = "1.23.0"),
2313 stable(feature = "atomic_nand", since = "1.27.0"),
2314 rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"),
2315 stable(feature = "rust1", since = "1.0.0"),
2318 atomic_umin, atomic_umax,
2320 "AtomicUsize::new(0)",
2321 usize AtomicUsize ATOMIC_USIZE_INIT
2326 atomic_int_ptr_sized! {
2333 #[cfg(target_has_atomic = "8")]
2334 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2345 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
2346 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
2349 Release => intrinsics::atomic_store_rel(dst, val),
2350 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2351 SeqCst => intrinsics::atomic_store(dst, val),
2352 Acquire => panic!("there is no such thing as an acquire store"),
2353 AcqRel => panic!("there is no such thing as an acquire/release store"),
2359 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
2360 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
2363 Acquire => intrinsics::atomic_load_acq(dst),
2364 Relaxed => intrinsics::atomic_load_relaxed(dst),
2365 SeqCst => intrinsics::atomic_load(dst),
2366 Release => panic!("there is no such thing as a release load"),
2367 AcqRel => panic!("there is no such thing as an acquire/release load"),
2373 #[cfg(target_has_atomic = "8")]
2374 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2375 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
2378 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2379 Release => intrinsics::atomic_xchg_rel(dst, val),
2380 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2381 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2382 SeqCst => intrinsics::atomic_xchg(dst, val),
2387 /// Returns the previous value (like __sync_fetch_and_add).
2389 #[cfg(target_has_atomic = "8")]
2390 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2391 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
2394 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2395 Release => intrinsics::atomic_xadd_rel(dst, val),
2396 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2397 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2398 SeqCst => intrinsics::atomic_xadd(dst, val),
2403 /// Returns the previous value (like __sync_fetch_and_sub).
2405 #[cfg(target_has_atomic = "8")]
2406 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2407 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
2410 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2411 Release => intrinsics::atomic_xsub_rel(dst, val),
2412 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2413 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2414 SeqCst => intrinsics::atomic_xsub(dst, val),
2420 #[cfg(target_has_atomic = "8")]
2421 unsafe fn atomic_compare_exchange<T: Copy>(
2428 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
2429 let (val, ok) = unsafe {
2430 match (success, failure) {
2431 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2432 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2433 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2434 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2435 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2436 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2437 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2438 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2439 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2440 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2441 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2442 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2445 if ok { Ok(val) } else { Err(val) }
2449 #[cfg(target_has_atomic = "8")]
2450 unsafe fn atomic_compare_exchange_weak<T: Copy>(
2457 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
2458 let (val, ok) = unsafe {
2459 match (success, failure) {
2460 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2461 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2462 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2463 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2464 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2465 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2466 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2467 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2468 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2469 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2470 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2471 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2474 if ok { Ok(val) } else { Err(val) }
2478 #[cfg(target_has_atomic = "8")]
2479 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2480 // SAFETY: the caller must uphold the safety contract for `atomic_and`
2483 Acquire => intrinsics::atomic_and_acq(dst, val),
2484 Release => intrinsics::atomic_and_rel(dst, val),
2485 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2486 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2487 SeqCst => intrinsics::atomic_and(dst, val),
2493 #[cfg(target_has_atomic = "8")]
2494 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2495 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
2498 Acquire => intrinsics::atomic_nand_acq(dst, val),
2499 Release => intrinsics::atomic_nand_rel(dst, val),
2500 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2501 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2502 SeqCst => intrinsics::atomic_nand(dst, val),
2508 #[cfg(target_has_atomic = "8")]
2509 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2510 // SAFETY: the caller must uphold the safety contract for `atomic_or`
2513 Acquire => intrinsics::atomic_or_acq(dst, val),
2514 Release => intrinsics::atomic_or_rel(dst, val),
2515 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2516 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2517 SeqCst => intrinsics::atomic_or(dst, val),
2523 #[cfg(target_has_atomic = "8")]
2524 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2525 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
2528 Acquire => intrinsics::atomic_xor_acq(dst, val),
2529 Release => intrinsics::atomic_xor_rel(dst, val),
2530 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2531 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2532 SeqCst => intrinsics::atomic_xor(dst, val),
2537 /// returns the max value (signed comparison)
2539 #[cfg(target_has_atomic = "8")]
2540 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2541 // SAFETY: the caller must uphold the safety contract for `atomic_max`
2544 Acquire => intrinsics::atomic_max_acq(dst, val),
2545 Release => intrinsics::atomic_max_rel(dst, val),
2546 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2547 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2548 SeqCst => intrinsics::atomic_max(dst, val),
2553 /// returns the min value (signed comparison)
2555 #[cfg(target_has_atomic = "8")]
2556 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2557 // SAFETY: the caller must uphold the safety contract for `atomic_min`
2560 Acquire => intrinsics::atomic_min_acq(dst, val),
2561 Release => intrinsics::atomic_min_rel(dst, val),
2562 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2563 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2564 SeqCst => intrinsics::atomic_min(dst, val),
2569 /// returns the max value (unsigned comparison)
2571 #[cfg(target_has_atomic = "8")]
2572 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2573 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
2576 Acquire => intrinsics::atomic_umax_acq(dst, val),
2577 Release => intrinsics::atomic_umax_rel(dst, val),
2578 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2579 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2580 SeqCst => intrinsics::atomic_umax(dst, val),
2585 /// returns the min value (unsigned comparison)
2587 #[cfg(target_has_atomic = "8")]
2588 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2589 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
2592 Acquire => intrinsics::atomic_umin_acq(dst, val),
2593 Release => intrinsics::atomic_umin_rel(dst, val),
2594 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2595 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2596 SeqCst => intrinsics::atomic_umin(dst, val),
2601 /// An atomic fence.
2603 /// Depending on the specified order, a fence prevents the compiler and CPU from
2604 /// reordering certain types of memory operations around it.
2605 /// That creates synchronizes-with relationships between it and atomic operations
2606 /// or fences in other threads.
2608 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2609 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2610 /// exist operations X and Y, both operating on some atomic object 'M' such
2611 /// that A is sequenced before X, Y is synchronized before B and Y observes
2612 /// the change to M. This provides a happens-before dependence between A and B.
2615 /// Thread 1 Thread 2
2617 /// fence(Release); A --------------
2618 /// x.store(3, Relaxed); X --------- |
2621 /// -------------> Y if x.load(Relaxed) == 3 {
2622 /// |-------> B fence(Acquire);
2627 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2630 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2631 /// and [`Release`] semantics, participates in the global program order of the
2632 /// other [`SeqCst`] operations and/or fences.
2634 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2638 /// Panics if `order` is [`Relaxed`].
2643 /// use std::sync::atomic::AtomicBool;
2644 /// use std::sync::atomic::fence;
2645 /// use std::sync::atomic::Ordering;
2647 /// // A mutual exclusion primitive based on spinlock.
2648 /// pub struct Mutex {
2649 /// flag: AtomicBool,
2653 /// pub fn new() -> Mutex {
2655 /// flag: AtomicBool::new(false),
2659 /// pub fn lock(&self) {
2660 /// // Wait until the old value is `false`.
2663 /// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
2666 /// // This fence synchronizes-with store in `unlock`.
2667 /// fence(Ordering::Acquire);
2670 /// pub fn unlock(&self) {
2671 /// self.flag.store(false, Ordering::Release);
2676 #[stable(feature = "rust1", since = "1.0.0")]
2677 #[rustc_diagnostic_item = "fence"]
2678 pub fn fence(order: Ordering) {
2679 // SAFETY: using an atomic fence is safe.
2682 Acquire => intrinsics::atomic_fence_acq(),
2683 Release => intrinsics::atomic_fence_rel(),
2684 AcqRel => intrinsics::atomic_fence_acqrel(),
2685 SeqCst => intrinsics::atomic_fence(),
2686 Relaxed => panic!("there is no such thing as a relaxed fence"),
2691 /// A compiler memory fence.
2693 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2694 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2695 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2696 /// or writes from before or after the call to the other side of the call to
2697 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2698 /// from doing such re-ordering. This is not a problem in a single-threaded,
2699 /// execution context, but when other threads may modify memory at the same
2700 /// time, stronger synchronization primitives such as [`fence`] are required.
2702 /// The re-ordering prevented by the different ordering semantics are:
2704 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2705 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2706 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2707 /// - with [`AcqRel`], both of the above rules are enforced.
2709 /// `compiler_fence` is generally only useful for preventing a thread from
2710 /// racing *with itself*. That is, if a given thread is executing one piece
2711 /// of code, and is then interrupted, and starts executing code elsewhere
2712 /// (while still in the same thread, and conceptually still on the same
2713 /// core). In traditional programs, this can only occur when a signal
2714 /// handler is registered. In more low-level code, such situations can also
2715 /// arise when handling interrupts, when implementing green threads with
2716 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2717 /// discussion of [memory barriers].
2721 /// Panics if `order` is [`Relaxed`].
2725 /// Without `compiler_fence`, the `assert_eq!` in following code
2726 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2727 /// To see why, remember that the compiler is free to swap the stores to
2728 /// `IMPORTANT_VARIABLE` and `IS_READY` since they are both
2729 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2730 /// after `IS_READY` is updated, then the signal handler will see
2731 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2732 /// Using a `compiler_fence` remedies this situation.
2735 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2736 /// use std::sync::atomic::Ordering;
2737 /// use std::sync::atomic::compiler_fence;
2739 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2740 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2743 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2744 /// // prevent earlier writes from being moved beyond this point
2745 /// compiler_fence(Ordering::Release);
2746 /// IS_READY.store(true, Ordering::Relaxed);
2749 /// fn signal_handler() {
2750 /// if IS_READY.load(Ordering::Relaxed) {
2751 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2756 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2758 #[stable(feature = "compiler_fences", since = "1.21.0")]
2759 #[rustc_diagnostic_item = "compiler_fence"]
2760 pub fn compiler_fence(order: Ordering) {
2761 // SAFETY: using an atomic fence is safe.
2764 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2765 Release => intrinsics::atomic_singlethreadfence_rel(),
2766 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2767 SeqCst => intrinsics::atomic_singlethreadfence(),
2768 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2773 #[cfg(target_has_atomic_load_store = "8")]
2774 #[stable(feature = "atomic_debug", since = "1.3.0")]
2775 impl fmt::Debug for AtomicBool {
2776 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2777 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2781 #[cfg(target_has_atomic_load_store = "ptr")]
2782 #[stable(feature = "atomic_debug", since = "1.3.0")]
2783 impl<T> fmt::Debug for AtomicPtr<T> {
2784 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2785 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2789 #[cfg(target_has_atomic_load_store = "ptr")]
2790 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2791 impl<T> fmt::Pointer for AtomicPtr<T> {
2792 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2793 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
2797 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
2799 /// This function is deprecated in favor of [`hint::spin_loop`].
2801 /// [`hint::spin_loop`]: crate::hint::spin_loop
2803 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
2804 #[rustc_deprecated(since = "1.51.0", reason = "use hint::spin_loop instead")]
2805 pub fn spin_loop_hint() {