3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! Each method takes an [`Ordering`] which represents the strength of
14 //! the memory barrier for that operation. These orderings are the
15 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
17 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
18 //! [2]: ../../../nomicon/atomics.html
20 //! Atomic variables are safe to share between threads (they implement [`Sync`])
21 //! but they do not themselves provide the mechanism for sharing and follow the
22 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
23 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
24 //! atomically-reference-counted shared pointer).
26 //! [arc]: ../../../std/sync/struct.Arc.html
28 //! Atomic types may be stored in static variables, initialized using
29 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
30 //! are often used for lazy global initialization.
34 //! All atomic types in this module are guaranteed to be [lock-free] if they're
35 //! available. This means they don't internally acquire a global mutex. Atomic
36 //! types and operations are not guaranteed to be wait-free. This means that
37 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
39 //! Atomic operations may be implemented at the instruction layer with
40 //! larger-size atomics. For example some platforms use 4-byte atomic
41 //! instructions to implement `AtomicI8`. Note that this emulation should not
42 //! have an impact on correctness of code, it's just something to be aware of.
44 //! The atomic types in this module may not be available on all platforms. The
45 //! atomic types here are all widely available, however, and can generally be
46 //! relied upon existing. Some notable exceptions are:
48 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
49 //! `AtomicI64` types.
50 //! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
51 //! and `store` operations, and do not support Compare and Swap (CAS)
52 //! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
53 //! these CAS operations are implemented via [operating system support], which
54 //! may come with a performance penalty.
55 //! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
56 //! and do not support Compare and Swap (CAS) operations, such as `swap`,
59 //! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
61 //! Note that future platforms may be added that also do not have support for
62 //! some atomic operations. Maximally portable code will want to be careful
63 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
64 //! generally the most portable, but even then they're not available everywhere.
65 //! For reference, the `std` library requires pointer-sized atomics, although
68 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
69 //! conditionally compile in code with atomics. There is an unstable
70 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
72 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
76 //! A simple spinlock:
79 //! use std::sync::Arc;
80 //! use std::sync::atomic::{AtomicUsize, Ordering};
81 //! use std::{hint, thread};
84 //! let spinlock = Arc::new(AtomicUsize::new(1));
86 //! let spinlock_clone = Arc::clone(&spinlock);
87 //! let thread = thread::spawn(move|| {
88 //! spinlock_clone.store(0, Ordering::SeqCst);
91 //! // Wait for the other thread to release the lock
92 //! while spinlock.load(Ordering::SeqCst) != 0 {
93 //! hint::spin_loop();
96 //! if let Err(panic) = thread.join() {
97 //! println!("Thread had an error: {:?}", panic);
102 //! Keep a global count of live threads:
105 //! use std::sync::atomic::{AtomicUsize, Ordering};
107 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
109 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
110 //! println!("live threads: {}", old_thread_count + 1);
113 #![stable(feature = "rust1", since = "1.0.0")]
114 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
115 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
117 use self::Ordering::*;
119 use crate::cell::UnsafeCell;
121 use crate::intrinsics;
123 use crate::hint::spin_loop;
125 /// A boolean type which can be safely shared between threads.
127 /// This type has the same in-memory representation as a [`bool`].
129 /// **Note**: This type is only available on platforms that support atomic
130 /// loads and stores of `u8`.
131 #[cfg(target_has_atomic_load_store = "8")]
132 #[stable(feature = "rust1", since = "1.0.0")]
134 pub struct AtomicBool {
138 #[cfg(target_has_atomic_load_store = "8")]
139 #[stable(feature = "rust1", since = "1.0.0")]
140 impl Default for AtomicBool {
141 /// Creates an `AtomicBool` initialized to `false`.
143 fn default() -> Self {
148 // Send is implicitly implemented for AtomicBool.
149 #[cfg(target_has_atomic_load_store = "8")]
150 #[stable(feature = "rust1", since = "1.0.0")]
151 unsafe impl Sync for AtomicBool {}
153 /// A raw pointer type which can be safely shared between threads.
155 /// This type has the same in-memory representation as a `*mut T`.
157 /// **Note**: This type is only available on platforms that support atomic
158 /// loads and stores of pointers. Its size depends on the target pointer's size.
159 #[cfg(target_has_atomic_load_store = "ptr")]
160 #[stable(feature = "rust1", since = "1.0.0")]
161 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
162 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
163 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
164 pub struct AtomicPtr<T> {
165 p: UnsafeCell<*mut T>,
168 #[cfg(target_has_atomic_load_store = "ptr")]
169 #[stable(feature = "rust1", since = "1.0.0")]
170 impl<T> Default for AtomicPtr<T> {
171 /// Creates a null `AtomicPtr<T>`.
172 fn default() -> AtomicPtr<T> {
173 AtomicPtr::new(crate::ptr::null_mut())
177 #[cfg(target_has_atomic_load_store = "ptr")]
178 #[stable(feature = "rust1", since = "1.0.0")]
179 unsafe impl<T> Send for AtomicPtr<T> {}
180 #[cfg(target_has_atomic_load_store = "ptr")]
181 #[stable(feature = "rust1", since = "1.0.0")]
182 unsafe impl<T> Sync for AtomicPtr<T> {}
184 /// Atomic memory orderings
186 /// Memory orderings specify the way atomic operations synchronize memory.
187 /// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
188 /// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
189 /// operations synchronize other memory while additionally preserving a total order of such
190 /// operations across all threads.
192 /// Rust's memory orderings are [the same as those of
193 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
195 /// For more information see the [nomicon].
197 /// [nomicon]: ../../../nomicon/atomics.html
198 #[stable(feature = "rust1", since = "1.0.0")]
199 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
202 /// No ordering constraints, only atomic operations.
204 /// Corresponds to [`memory_order_relaxed`] in C++20.
206 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
207 #[stable(feature = "rust1", since = "1.0.0")]
209 /// When coupled with a store, all previous operations become ordered
210 /// before any load of this value with [`Acquire`] (or stronger) ordering.
211 /// In particular, all previous writes become visible to all threads
212 /// that perform an [`Acquire`] (or stronger) load of this value.
214 /// Notice that using this ordering for an operation that combines loads
215 /// and stores leads to a [`Relaxed`] load operation!
217 /// This ordering is only applicable for operations that can perform a store.
219 /// Corresponds to [`memory_order_release`] in C++20.
221 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
222 #[stable(feature = "rust1", since = "1.0.0")]
224 /// When coupled with a load, if the loaded value was written by a store operation with
225 /// [`Release`] (or stronger) ordering, then all subsequent operations
226 /// become ordered after that store. In particular, all subsequent loads will see data
227 /// written before the store.
229 /// Notice that using this ordering for an operation that combines loads
230 /// and stores leads to a [`Relaxed`] store operation!
232 /// This ordering is only applicable for operations that can perform a load.
234 /// Corresponds to [`memory_order_acquire`] in C++20.
236 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
237 #[stable(feature = "rust1", since = "1.0.0")]
239 /// Has the effects of both [`Acquire`] and [`Release`] together:
240 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
242 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
243 /// not performing any store and hence it has just [`Acquire`] ordering. However,
244 /// `AcqRel` will never perform [`Relaxed`] accesses.
246 /// This ordering is only applicable for operations that combine both loads and stores.
248 /// Corresponds to [`memory_order_acq_rel`] in C++20.
250 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
251 #[stable(feature = "rust1", since = "1.0.0")]
253 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
254 /// operations, respectively) with the additional guarantee that all threads see all
255 /// sequentially consistent operations in the same order.
257 /// Corresponds to [`memory_order_seq_cst`] in C++20.
259 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
260 #[stable(feature = "rust1", since = "1.0.0")]
264 /// An [`AtomicBool`] initialized to `false`.
265 #[cfg(target_has_atomic_load_store = "8")]
266 #[stable(feature = "rust1", since = "1.0.0")]
269 reason = "the `new` function is now preferred",
270 suggestion = "AtomicBool::new(false)"
272 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
274 #[cfg(target_has_atomic_load_store = "8")]
276 /// Creates a new `AtomicBool`.
281 /// use std::sync::atomic::AtomicBool;
283 /// let atomic_true = AtomicBool::new(true);
284 /// let atomic_false = AtomicBool::new(false);
287 #[stable(feature = "rust1", since = "1.0.0")]
288 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
289 pub const fn new(v: bool) -> AtomicBool {
290 AtomicBool { v: UnsafeCell::new(v as u8) }
293 /// Returns a mutable reference to the underlying [`bool`].
295 /// This is safe because the mutable reference guarantees that no other threads are
296 /// concurrently accessing the atomic data.
301 /// use std::sync::atomic::{AtomicBool, Ordering};
303 /// let mut some_bool = AtomicBool::new(true);
304 /// assert_eq!(*some_bool.get_mut(), true);
305 /// *some_bool.get_mut() = false;
306 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
309 #[stable(feature = "atomic_access", since = "1.15.0")]
310 pub fn get_mut(&mut self) -> &mut bool {
311 // SAFETY: the mutable reference guarantees unique ownership.
312 unsafe { &mut *(self.v.get() as *mut bool) }
315 /// Get atomic access to a `&mut bool`.
320 /// #![feature(atomic_from_mut)]
321 /// use std::sync::atomic::{AtomicBool, Ordering};
323 /// let mut some_bool = true;
324 /// let a = AtomicBool::from_mut(&mut some_bool);
325 /// a.store(false, Ordering::Relaxed);
326 /// assert_eq!(some_bool, false);
329 #[cfg(target_has_atomic_equal_alignment = "8")]
330 #[unstable(feature = "atomic_from_mut", issue = "76314")]
331 pub fn from_mut(v: &mut bool) -> &Self {
332 // SAFETY: the mutable reference guarantees unique ownership, and
333 // alignment of both `bool` and `Self` is 1.
334 unsafe { &*(v as *mut bool as *mut Self) }
337 /// Consumes the atomic and returns the contained value.
339 /// This is safe because passing `self` by value guarantees that no other threads are
340 /// concurrently accessing the atomic data.
345 /// use std::sync::atomic::AtomicBool;
347 /// let some_bool = AtomicBool::new(true);
348 /// assert_eq!(some_bool.into_inner(), true);
351 #[stable(feature = "atomic_access", since = "1.15.0")]
352 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
353 pub const fn into_inner(self) -> bool {
354 self.v.into_inner() != 0
357 /// Loads a value from the bool.
359 /// `load` takes an [`Ordering`] argument which describes the memory ordering
360 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
364 /// Panics if `order` is [`Release`] or [`AcqRel`].
369 /// use std::sync::atomic::{AtomicBool, Ordering};
371 /// let some_bool = AtomicBool::new(true);
373 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
376 #[stable(feature = "rust1", since = "1.0.0")]
377 pub fn load(&self, order: Ordering) -> bool {
378 // SAFETY: any data races are prevented by atomic intrinsics and the raw
379 // pointer passed in is valid because we got it from a reference.
380 unsafe { atomic_load(self.v.get(), order) != 0 }
383 /// Stores a value into the bool.
385 /// `store` takes an [`Ordering`] argument which describes the memory ordering
386 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
390 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
395 /// use std::sync::atomic::{AtomicBool, Ordering};
397 /// let some_bool = AtomicBool::new(true);
399 /// some_bool.store(false, Ordering::Relaxed);
400 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
403 #[stable(feature = "rust1", since = "1.0.0")]
404 pub fn store(&self, val: bool, order: Ordering) {
405 // SAFETY: any data races are prevented by atomic intrinsics and the raw
406 // pointer passed in is valid because we got it from a reference.
408 atomic_store(self.v.get(), val as u8, order);
412 /// Stores a value into the bool, returning the previous value.
414 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
415 /// of this operation. All ordering modes are possible. Note that using
416 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
417 /// using [`Release`] makes the load part [`Relaxed`].
419 /// **Note:** This method is only available on platforms that support atomic
420 /// operations on `u8`.
425 /// use std::sync::atomic::{AtomicBool, Ordering};
427 /// let some_bool = AtomicBool::new(true);
429 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
430 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
433 #[stable(feature = "rust1", since = "1.0.0")]
434 #[cfg(target_has_atomic = "8")]
435 pub fn swap(&self, val: bool, order: Ordering) -> bool {
436 // SAFETY: data races are prevented by atomic intrinsics.
437 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
440 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
442 /// The return value is always the previous value. If it is equal to `current`, then the value
445 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
446 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
447 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
448 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
449 /// happens, and using [`Release`] makes the load part [`Relaxed`].
451 /// **Note:** This method is only available on platforms that support atomic
452 /// operations on `u8`.
454 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
456 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
457 /// memory orderings:
459 /// Original | Success | Failure
460 /// -------- | ------- | -------
461 /// Relaxed | Relaxed | Relaxed
462 /// Acquire | Acquire | Acquire
463 /// Release | Release | Relaxed
464 /// AcqRel | AcqRel | Acquire
465 /// SeqCst | SeqCst | SeqCst
467 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
468 /// which allows the compiler to generate better assembly code when the compare and swap
469 /// is used in a loop.
474 /// use std::sync::atomic::{AtomicBool, Ordering};
476 /// let some_bool = AtomicBool::new(true);
478 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
479 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
481 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
482 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
485 #[stable(feature = "rust1", since = "1.0.0")]
488 reason = "Use `compare_exchange` or `compare_exchange_weak` instead"
490 #[cfg(target_has_atomic = "8")]
491 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
492 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
498 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
500 /// The return value is a result indicating whether the new value was written and containing
501 /// the previous value. On success this value is guaranteed to be equal to `current`.
503 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
504 /// ordering of this operation. `success` describes the required ordering for the
505 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
506 /// `failure` describes the required ordering for the load operation that takes place when
507 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
508 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
509 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
510 /// and must be equivalent to or weaker than the success ordering.
512 /// **Note:** This method is only available on platforms that support atomic
513 /// operations on `u8`.
518 /// use std::sync::atomic::{AtomicBool, Ordering};
520 /// let some_bool = AtomicBool::new(true);
522 /// assert_eq!(some_bool.compare_exchange(true,
524 /// Ordering::Acquire,
525 /// Ordering::Relaxed),
527 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
529 /// assert_eq!(some_bool.compare_exchange(true, true,
530 /// Ordering::SeqCst,
531 /// Ordering::Acquire),
533 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
536 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
537 #[doc(alias = "compare_and_swap")]
538 #[cfg(target_has_atomic = "8")]
539 pub fn compare_exchange(
545 ) -> Result<bool, bool> {
546 // SAFETY: data races are prevented by atomic intrinsics.
548 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
551 Err(x) => Err(x != 0),
555 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
557 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
558 /// comparison succeeds, which can result in more efficient code on some platforms. The
559 /// return value is a result indicating whether the new value was written and containing the
562 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
563 /// ordering of this operation. `success` describes the required ordering for the
564 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
565 /// `failure` describes the required ordering for the load operation that takes place when
566 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
567 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
568 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
569 /// and must be equivalent to or weaker than the success ordering.
571 /// **Note:** This method is only available on platforms that support atomic
572 /// operations on `u8`.
577 /// use std::sync::atomic::{AtomicBool, Ordering};
579 /// let val = AtomicBool::new(false);
582 /// let mut old = val.load(Ordering::Relaxed);
584 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
586 /// Err(x) => old = x,
591 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
592 #[doc(alias = "compare_and_swap")]
593 #[cfg(target_has_atomic = "8")]
594 pub fn compare_exchange_weak(
600 ) -> Result<bool, bool> {
601 // SAFETY: data races are prevented by atomic intrinsics.
603 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
606 Err(x) => Err(x != 0),
610 /// Logical "and" with a boolean value.
612 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
613 /// the new value to the result.
615 /// Returns the previous value.
617 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
618 /// of this operation. All ordering modes are possible. Note that using
619 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
620 /// using [`Release`] makes the load part [`Relaxed`].
622 /// **Note:** This method is only available on platforms that support atomic
623 /// operations on `u8`.
628 /// use std::sync::atomic::{AtomicBool, Ordering};
630 /// let foo = AtomicBool::new(true);
631 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
632 /// assert_eq!(foo.load(Ordering::SeqCst), false);
634 /// let foo = AtomicBool::new(true);
635 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
636 /// assert_eq!(foo.load(Ordering::SeqCst), true);
638 /// let foo = AtomicBool::new(false);
639 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
640 /// assert_eq!(foo.load(Ordering::SeqCst), false);
643 #[stable(feature = "rust1", since = "1.0.0")]
644 #[cfg(target_has_atomic = "8")]
645 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
646 // SAFETY: data races are prevented by atomic intrinsics.
647 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
650 /// Logical "nand" with a boolean value.
652 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
653 /// the new value to the result.
655 /// Returns the previous value.
657 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
658 /// of this operation. All ordering modes are possible. Note that using
659 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
660 /// using [`Release`] makes the load part [`Relaxed`].
662 /// **Note:** This method is only available on platforms that support atomic
663 /// operations on `u8`.
668 /// use std::sync::atomic::{AtomicBool, Ordering};
670 /// let foo = AtomicBool::new(true);
671 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
672 /// assert_eq!(foo.load(Ordering::SeqCst), true);
674 /// let foo = AtomicBool::new(true);
675 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
676 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
677 /// assert_eq!(foo.load(Ordering::SeqCst), false);
679 /// let foo = AtomicBool::new(false);
680 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
681 /// assert_eq!(foo.load(Ordering::SeqCst), true);
684 #[stable(feature = "rust1", since = "1.0.0")]
685 #[cfg(target_has_atomic = "8")]
686 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
687 // We can't use atomic_nand here because it can result in a bool with
688 // an invalid value. This happens because the atomic operation is done
689 // with an 8-bit integer internally, which would set the upper 7 bits.
690 // So we just use fetch_xor or swap instead.
693 // We must invert the bool.
694 self.fetch_xor(true, order)
696 // !(x & false) == true
697 // We must set the bool to true.
698 self.swap(true, order)
702 /// Logical "or" with a boolean value.
704 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
705 /// new value to the result.
707 /// Returns the previous value.
709 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
710 /// of this operation. All ordering modes are possible. Note that using
711 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
712 /// using [`Release`] makes the load part [`Relaxed`].
714 /// **Note:** This method is only available on platforms that support atomic
715 /// operations on `u8`.
720 /// use std::sync::atomic::{AtomicBool, Ordering};
722 /// let foo = AtomicBool::new(true);
723 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
724 /// assert_eq!(foo.load(Ordering::SeqCst), true);
726 /// let foo = AtomicBool::new(true);
727 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
728 /// assert_eq!(foo.load(Ordering::SeqCst), true);
730 /// let foo = AtomicBool::new(false);
731 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
732 /// assert_eq!(foo.load(Ordering::SeqCst), false);
735 #[stable(feature = "rust1", since = "1.0.0")]
736 #[cfg(target_has_atomic = "8")]
737 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
738 // SAFETY: data races are prevented by atomic intrinsics.
739 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
742 /// Logical "xor" with a boolean value.
744 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
745 /// the new value to the result.
747 /// Returns the previous value.
749 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
750 /// of this operation. All ordering modes are possible. Note that using
751 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
752 /// using [`Release`] makes the load part [`Relaxed`].
754 /// **Note:** This method is only available on platforms that support atomic
755 /// operations on `u8`.
760 /// use std::sync::atomic::{AtomicBool, Ordering};
762 /// let foo = AtomicBool::new(true);
763 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
764 /// assert_eq!(foo.load(Ordering::SeqCst), true);
766 /// let foo = AtomicBool::new(true);
767 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
768 /// assert_eq!(foo.load(Ordering::SeqCst), false);
770 /// let foo = AtomicBool::new(false);
771 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
772 /// assert_eq!(foo.load(Ordering::SeqCst), false);
775 #[stable(feature = "rust1", since = "1.0.0")]
776 #[cfg(target_has_atomic = "8")]
777 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
778 // SAFETY: data races are prevented by atomic intrinsics.
779 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
782 /// Returns a mutable pointer to the underlying [`bool`].
784 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
785 /// This method is mostly useful for FFI, where the function signature may use
786 /// `*mut bool` instead of `&AtomicBool`.
788 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
789 /// atomic types work with interior mutability. All modifications of an atomic change the value
790 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
791 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
792 /// restriction: operations on it must be atomic.
796 /// ```ignore (extern-declaration)
798 /// use std::sync::atomic::AtomicBool;
800 /// fn my_atomic_op(arg: *mut bool);
803 /// let mut atomic = AtomicBool::new(true);
805 /// my_atomic_op(atomic.as_mut_ptr());
810 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
811 pub fn as_mut_ptr(&self) -> *mut bool {
812 self.v.get() as *mut bool
815 /// Fetches the value, and applies a function to it that returns an optional
816 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
817 /// returned `Some(_)`, else `Err(previous_value)`.
819 /// Note: This may call the function multiple times if the value has been
820 /// changed from other threads in the meantime, as long as the function
821 /// returns `Some(_)`, but the function will have been applied only once to
822 /// the stored value.
824 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
825 /// ordering of this operation. The first describes the required ordering for
826 /// when the operation finally succeeds while the second describes the
827 /// required ordering for loads. These correspond to the success and failure
828 /// orderings of [`AtomicBool::compare_exchange`] respectively.
830 /// Using [`Acquire`] as success ordering makes the store part of this
831 /// operation [`Relaxed`], and using [`Release`] makes the final successful
832 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
833 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
834 /// success ordering.
836 /// **Note:** This method is only available on platforms that support atomic
837 /// operations on `u8`.
842 /// #![feature(atomic_fetch_update)]
843 /// use std::sync::atomic::{AtomicBool, Ordering};
845 /// let x = AtomicBool::new(false);
846 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
847 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
848 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
849 /// assert_eq!(x.load(Ordering::SeqCst), false);
852 #[unstable(feature = "atomic_fetch_update", reason = "recently added", issue = "78639")]
853 #[cfg(target_has_atomic = "8")]
854 pub fn fetch_update<F>(
857 fetch_order: Ordering,
859 ) -> Result<bool, bool>
861 F: FnMut(bool) -> Option<bool>,
863 let mut prev = self.load(fetch_order);
864 while let Some(next) = f(prev) {
865 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
866 x @ Ok(_) => return x,
867 Err(next_prev) => prev = next_prev,
874 #[cfg(target_has_atomic_load_store = "ptr")]
875 impl<T> AtomicPtr<T> {
876 /// Creates a new `AtomicPtr`.
881 /// use std::sync::atomic::AtomicPtr;
883 /// let ptr = &mut 5;
884 /// let atomic_ptr = AtomicPtr::new(ptr);
887 #[stable(feature = "rust1", since = "1.0.0")]
888 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
889 pub const fn new(p: *mut T) -> AtomicPtr<T> {
890 AtomicPtr { p: UnsafeCell::new(p) }
893 /// Returns a mutable reference to the underlying pointer.
895 /// This is safe because the mutable reference guarantees that no other threads are
896 /// concurrently accessing the atomic data.
901 /// use std::sync::atomic::{AtomicPtr, Ordering};
903 /// let mut data = 10;
904 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
905 /// let mut other_data = 5;
906 /// *atomic_ptr.get_mut() = &mut other_data;
907 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
910 #[stable(feature = "atomic_access", since = "1.15.0")]
911 pub fn get_mut(&mut self) -> &mut *mut T {
915 /// Get atomic access to a pointer.
920 /// #![feature(atomic_from_mut)]
921 /// use std::sync::atomic::{AtomicPtr, Ordering};
923 /// let mut data = 123;
924 /// let mut some_ptr = &mut data as *mut i32;
925 /// let a = AtomicPtr::from_mut(&mut some_ptr);
926 /// let mut other_data = 456;
927 /// a.store(&mut other_data, Ordering::Relaxed);
928 /// assert_eq!(unsafe { *some_ptr }, 456);
931 #[cfg(target_has_atomic_equal_alignment = "ptr")]
932 #[unstable(feature = "atomic_from_mut", issue = "76314")]
933 pub fn from_mut(v: &mut *mut T) -> &Self {
934 use crate::mem::align_of;
935 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
937 // - the mutable reference guarantees unique ownership.
938 // - the alignment of `*mut T` and `Self` is the same on all platforms
939 // supported by rust, as verified above.
940 unsafe { &*(v as *mut *mut T as *mut Self) }
943 /// Consumes the atomic and returns the contained value.
945 /// This is safe because passing `self` by value guarantees that no other threads are
946 /// concurrently accessing the atomic data.
951 /// use std::sync::atomic::AtomicPtr;
953 /// let mut data = 5;
954 /// let atomic_ptr = AtomicPtr::new(&mut data);
955 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
958 #[stable(feature = "atomic_access", since = "1.15.0")]
959 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
960 pub const fn into_inner(self) -> *mut T {
964 /// Loads a value from the pointer.
966 /// `load` takes an [`Ordering`] argument which describes the memory ordering
967 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
971 /// Panics if `order` is [`Release`] or [`AcqRel`].
976 /// use std::sync::atomic::{AtomicPtr, Ordering};
978 /// let ptr = &mut 5;
979 /// let some_ptr = AtomicPtr::new(ptr);
981 /// let value = some_ptr.load(Ordering::Relaxed);
984 #[stable(feature = "rust1", since = "1.0.0")]
985 pub fn load(&self, order: Ordering) -> *mut T {
986 // SAFETY: data races are prevented by atomic intrinsics.
987 unsafe { atomic_load(self.p.get(), order) }
990 /// Stores a value into the pointer.
992 /// `store` takes an [`Ordering`] argument which describes the memory ordering
993 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
997 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1002 /// use std::sync::atomic::{AtomicPtr, Ordering};
1004 /// let ptr = &mut 5;
1005 /// let some_ptr = AtomicPtr::new(ptr);
1007 /// let other_ptr = &mut 10;
1009 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1012 #[stable(feature = "rust1", since = "1.0.0")]
1013 pub fn store(&self, ptr: *mut T, order: Ordering) {
1014 // SAFETY: data races are prevented by atomic intrinsics.
1016 atomic_store(self.p.get(), ptr, order);
1020 /// Stores a value into the pointer, returning the previous value.
1022 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1023 /// of this operation. All ordering modes are possible. Note that using
1024 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1025 /// using [`Release`] makes the load part [`Relaxed`].
1027 /// **Note:** This method is only available on platforms that support atomic
1028 /// operations on pointers.
1033 /// use std::sync::atomic::{AtomicPtr, Ordering};
1035 /// let ptr = &mut 5;
1036 /// let some_ptr = AtomicPtr::new(ptr);
1038 /// let other_ptr = &mut 10;
1040 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1043 #[stable(feature = "rust1", since = "1.0.0")]
1044 #[cfg(target_has_atomic = "ptr")]
1045 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1046 // SAFETY: data races are prevented by atomic intrinsics.
1047 unsafe { atomic_swap(self.p.get(), ptr, order) }
1050 /// Stores a value into the pointer if the current value is the same as the `current` value.
1052 /// The return value is always the previous value. If it is equal to `current`, then the value
1055 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1056 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1057 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1058 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1059 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1061 /// **Note:** This method is only available on platforms that support atomic
1062 /// operations on pointers.
1064 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1066 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1067 /// memory orderings:
1069 /// Original | Success | Failure
1070 /// -------- | ------- | -------
1071 /// Relaxed | Relaxed | Relaxed
1072 /// Acquire | Acquire | Acquire
1073 /// Release | Release | Relaxed
1074 /// AcqRel | AcqRel | Acquire
1075 /// SeqCst | SeqCst | SeqCst
1077 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1078 /// which allows the compiler to generate better assembly code when the compare and swap
1079 /// is used in a loop.
1084 /// use std::sync::atomic::{AtomicPtr, Ordering};
1086 /// let ptr = &mut 5;
1087 /// let some_ptr = AtomicPtr::new(ptr);
1089 /// let other_ptr = &mut 10;
1091 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1094 #[stable(feature = "rust1", since = "1.0.0")]
1097 reason = "Use `compare_exchange` or `compare_exchange_weak` instead"
1099 #[cfg(target_has_atomic = "ptr")]
1100 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1101 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1107 /// Stores a value into the pointer if the current value is the same as the `current` value.
1109 /// The return value is a result indicating whether the new value was written and containing
1110 /// the previous value. On success this value is guaranteed to be equal to `current`.
1112 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1113 /// ordering of this operation. `success` describes the required ordering for the
1114 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1115 /// `failure` describes the required ordering for the load operation that takes place when
1116 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1117 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1118 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1119 /// and must be equivalent to or weaker than the success ordering.
1121 /// **Note:** This method is only available on platforms that support atomic
1122 /// operations on pointers.
1127 /// use std::sync::atomic::{AtomicPtr, Ordering};
1129 /// let ptr = &mut 5;
1130 /// let some_ptr = AtomicPtr::new(ptr);
1132 /// let other_ptr = &mut 10;
1134 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1135 /// Ordering::SeqCst, Ordering::Relaxed);
1138 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1139 #[cfg(target_has_atomic = "ptr")]
1140 pub fn compare_exchange(
1146 ) -> Result<*mut T, *mut T> {
1147 // SAFETY: data races are prevented by atomic intrinsics.
1148 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1151 /// Stores a value into the pointer if the current value is the same as the `current` value.
1153 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1154 /// comparison succeeds, which can result in more efficient code on some platforms. The
1155 /// return value is a result indicating whether the new value was written and containing the
1158 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1159 /// ordering of this operation. `success` describes the required ordering for the
1160 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1161 /// `failure` describes the required ordering for the load operation that takes place when
1162 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1163 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1164 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1165 /// and must be equivalent to or weaker than the success ordering.
1167 /// **Note:** This method is only available on platforms that support atomic
1168 /// operations on pointers.
1173 /// use std::sync::atomic::{AtomicPtr, Ordering};
1175 /// let some_ptr = AtomicPtr::new(&mut 5);
1177 /// let new = &mut 10;
1178 /// let mut old = some_ptr.load(Ordering::Relaxed);
1180 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1182 /// Err(x) => old = x,
1187 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1188 #[cfg(target_has_atomic = "ptr")]
1189 pub fn compare_exchange_weak(
1195 ) -> Result<*mut T, *mut T> {
1196 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1197 // but we know for sure that the pointer is valid (we just got it from
1198 // an `UnsafeCell` that we have by reference) and the atomic operation
1199 // itself allows us to safely mutate the `UnsafeCell` contents.
1200 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
1203 /// Fetches the value, and applies a function to it that returns an optional
1204 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1205 /// returned `Some(_)`, else `Err(previous_value)`.
1207 /// Note: This may call the function multiple times if the value has been
1208 /// changed from other threads in the meantime, as long as the function
1209 /// returns `Some(_)`, but the function will have been applied only once to
1210 /// the stored value.
1212 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1213 /// ordering of this operation. The first describes the required ordering for
1214 /// when the operation finally succeeds while the second describes the
1215 /// required ordering for loads. These correspond to the success and failure
1216 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
1218 /// Using [`Acquire`] as success ordering makes the store part of this
1219 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1220 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1221 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
1222 /// success ordering.
1224 /// **Note:** This method is only available on platforms that support atomic
1225 /// operations on pointers.
1230 /// #![feature(atomic_fetch_update)]
1231 /// use std::sync::atomic::{AtomicPtr, Ordering};
1233 /// let ptr: *mut _ = &mut 5;
1234 /// let some_ptr = AtomicPtr::new(ptr);
1236 /// let new: *mut _ = &mut 10;
1237 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
1238 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
1245 /// assert_eq!(result, Ok(ptr));
1246 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
1249 #[unstable(feature = "atomic_fetch_update", reason = "recently added", issue = "78639")]
1250 #[cfg(target_has_atomic = "ptr")]
1251 pub fn fetch_update<F>(
1253 set_order: Ordering,
1254 fetch_order: Ordering,
1256 ) -> Result<*mut T, *mut T>
1258 F: FnMut(*mut T) -> Option<*mut T>,
1260 let mut prev = self.load(fetch_order);
1261 while let Some(next) = f(prev) {
1262 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1263 x @ Ok(_) => return x,
1264 Err(next_prev) => prev = next_prev,
1271 #[cfg(target_has_atomic_load_store = "8")]
1272 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1273 impl From<bool> for AtomicBool {
1274 /// Converts a `bool` into an `AtomicBool`.
1279 /// use std::sync::atomic::AtomicBool;
1280 /// let atomic_bool = AtomicBool::from(true);
1281 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1284 fn from(b: bool) -> Self {
1289 #[cfg(target_has_atomic_load_store = "ptr")]
1290 #[stable(feature = "atomic_from", since = "1.23.0")]
1291 impl<T> From<*mut T> for AtomicPtr<T> {
1293 fn from(p: *mut T) -> Self {
1298 #[allow(unused_macros)] // This macro ends up being unused on some architectures.
1299 macro_rules! if_not_8_bit {
1300 (u8, $($tt:tt)*) => { "" };
1301 (i8, $($tt:tt)*) => { "" };
1302 ($_:ident, $($tt:tt)*) => { $($tt)* };
1305 #[cfg(target_has_atomic_load_store = "8")]
1306 macro_rules! atomic_int {
1312 $stable_access:meta,
1316 $stable_init_const:meta,
1317 $s_int_type:literal,
1318 $extra_feature:expr,
1319 $min_fn:ident, $max_fn:ident,
1322 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1323 /// An integer type which can be safely shared between threads.
1325 /// This type has the same in-memory representation as the underlying
1326 /// integer type, [`
1327 #[doc = $s_int_type]
1328 /// `]. For more about the differences between atomic types and
1329 /// non-atomic types as well as information about the portability of
1330 /// this type, please see the [module-level documentation].
1332 /// **Note:** This type is only available on platforms that support
1333 /// atomic loads and stores of [`
1334 #[doc = $s_int_type]
1337 /// [module-level documentation]: crate::sync::atomic
1339 #[repr(C, align($align))]
1340 pub struct $atomic_type {
1341 v: UnsafeCell<$int_type>,
1344 /// An atomic integer initialized to `0`.
1345 #[$stable_init_const]
1348 reason = "the `new` function is now preferred",
1349 suggestion = $atomic_new,
1351 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1354 impl Default for $atomic_type {
1356 fn default() -> Self {
1357 Self::new(Default::default())
1362 impl From<$int_type> for $atomic_type {
1363 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
1365 fn from(v: $int_type) -> Self { Self::new(v) }
1369 impl fmt::Debug for $atomic_type {
1370 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1371 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1375 // Send is implicitly implemented.
1377 unsafe impl Sync for $atomic_type {}
1380 /// Creates a new atomic integer.
1385 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1387 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
1392 pub const fn new(v: $int_type) -> Self {
1393 Self {v: UnsafeCell::new(v)}
1396 /// Returns a mutable reference to the underlying integer.
1398 /// This is safe because the mutable reference guarantees that no other threads are
1399 /// concurrently accessing the atomic data.
1404 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1406 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
1407 /// assert_eq!(*some_var.get_mut(), 10);
1408 /// *some_var.get_mut() = 5;
1409 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
1413 pub fn get_mut(&mut self) -> &mut $int_type {
1417 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
1419 #[doc = if_not_8_bit! {
1422 "**Note:** This function is only available on targets where `",
1423 stringify!($int_type), "` has an alignment of ", $align, " bytes."
1430 /// #![feature(atomic_from_mut)]
1431 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1433 /// let mut some_int = 123;
1434 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
1435 /// a.store(100, Ordering::Relaxed);
1436 /// assert_eq!(some_int, 100);
1441 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1442 pub fn from_mut(v: &mut $int_type) -> &Self {
1443 use crate::mem::align_of;
1444 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
1446 // - the mutable reference guarantees unique ownership.
1447 // - the alignment of `$int_type` and `Self` is the
1448 // same, as promised by $cfg_align and verified above.
1449 unsafe { &*(v as *mut $int_type as *mut Self) }
1452 /// Consumes the atomic and returns the contained value.
1454 /// This is safe because passing `self` by value guarantees that no other threads are
1455 /// concurrently accessing the atomic data.
1460 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1462 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1463 /// assert_eq!(some_var.into_inner(), 5);
1467 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
1468 pub const fn into_inner(self) -> $int_type {
1472 /// Loads a value from the atomic integer.
1474 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1475 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1479 /// Panics if `order` is [`Release`] or [`AcqRel`].
1484 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1486 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1488 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
1492 pub fn load(&self, order: Ordering) -> $int_type {
1493 // SAFETY: data races are prevented by atomic intrinsics.
1494 unsafe { atomic_load(self.v.get(), order) }
1497 /// Stores a value into the atomic integer.
1499 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1500 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1504 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1509 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1511 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1513 /// some_var.store(10, Ordering::Relaxed);
1514 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1518 pub fn store(&self, val: $int_type, order: Ordering) {
1519 // SAFETY: data races are prevented by atomic intrinsics.
1520 unsafe { atomic_store(self.v.get(), val, order); }
1523 /// Stores a value into the atomic integer, returning the previous value.
1525 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1526 /// of this operation. All ordering modes are possible. Note that using
1527 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1528 /// using [`Release`] makes the load part [`Relaxed`].
1530 /// **Note**: This method is only available on platforms that support atomic operations on
1531 #[doc = concat!("[`", $s_int_type, "`].")]
1536 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1538 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1540 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1545 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1546 // SAFETY: data races are prevented by atomic intrinsics.
1547 unsafe { atomic_swap(self.v.get(), val, order) }
1550 /// Stores a value into the atomic integer if the current value is the same as
1551 /// the `current` value.
1553 /// The return value is always the previous value. If it is equal to `current`, then the
1554 /// value was updated.
1556 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1557 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1558 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1559 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1560 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1562 /// **Note**: This method is only available on platforms that support atomic operations on
1563 #[doc = concat!("[`", $s_int_type, "`].")]
1565 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1567 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1568 /// memory orderings:
1570 /// Original | Success | Failure
1571 /// -------- | ------- | -------
1572 /// Relaxed | Relaxed | Relaxed
1573 /// Acquire | Acquire | Acquire
1574 /// Release | Release | Relaxed
1575 /// AcqRel | AcqRel | Acquire
1576 /// SeqCst | SeqCst | SeqCst
1578 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1579 /// which allows the compiler to generate better assembly code when the compare and swap
1580 /// is used in a loop.
1585 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1587 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1589 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1590 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1592 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1593 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1599 reason = "Use `compare_exchange` or `compare_exchange_weak` instead")
1602 pub fn compare_and_swap(&self,
1605 order: Ordering) -> $int_type {
1606 match self.compare_exchange(current,
1609 strongest_failure_ordering(order)) {
1615 /// Stores a value into the atomic integer if the current value is the same as
1616 /// the `current` value.
1618 /// The return value is a result indicating whether the new value was written and
1619 /// containing the previous value. On success this value is guaranteed to be equal to
1622 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1623 /// ordering of this operation. `success` describes the required ordering for the
1624 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1625 /// `failure` describes the required ordering for the load operation that takes place when
1626 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1627 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1628 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1629 /// and must be equivalent to or weaker than the success ordering.
1631 /// **Note**: This method is only available on platforms that support atomic operations on
1632 #[doc = concat!("[`", $s_int_type, "`].")]
1637 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1639 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1641 /// assert_eq!(some_var.compare_exchange(5, 10,
1642 /// Ordering::Acquire,
1643 /// Ordering::Relaxed),
1645 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1647 /// assert_eq!(some_var.compare_exchange(6, 12,
1648 /// Ordering::SeqCst,
1649 /// Ordering::Acquire),
1651 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1656 pub fn compare_exchange(&self,
1660 failure: Ordering) -> Result<$int_type, $int_type> {
1661 // SAFETY: data races are prevented by atomic intrinsics.
1662 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1665 /// Stores a value into the atomic integer if the current value is the same as
1666 /// the `current` value.
1668 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
1669 /// this function is allowed to spuriously fail even
1670 /// when the comparison succeeds, which can result in more efficient code on some
1671 /// platforms. The return value is a result indicating whether the new value was
1672 /// written and containing the previous value.
1674 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1675 /// ordering of this operation. `success` describes the required ordering for the
1676 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1677 /// `failure` describes the required ordering for the load operation that takes place when
1678 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1679 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1680 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1681 /// and must be equivalent to or weaker than the success ordering.
1683 /// **Note**: This method is only available on platforms that support atomic operations on
1684 #[doc = concat!("[`", $s_int_type, "`].")]
1689 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1691 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
1693 /// let mut old = val.load(Ordering::Relaxed);
1695 /// let new = old * 2;
1696 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1698 /// Err(x) => old = x,
1705 pub fn compare_exchange_weak(&self,
1709 failure: Ordering) -> Result<$int_type, $int_type> {
1710 // SAFETY: data races are prevented by atomic intrinsics.
1712 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1716 /// Adds to the current value, returning the previous value.
1718 /// This operation wraps around on overflow.
1720 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1721 /// of this operation. All ordering modes are possible. Note that using
1722 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1723 /// using [`Release`] makes the load part [`Relaxed`].
1725 /// **Note**: This method is only available on platforms that support atomic operations on
1726 #[doc = concat!("[`", $s_int_type, "`].")]
1731 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1733 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
1734 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1735 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1740 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1741 // SAFETY: data races are prevented by atomic intrinsics.
1742 unsafe { atomic_add(self.v.get(), val, order) }
1745 /// Subtracts from the current value, returning the previous value.
1747 /// This operation wraps around on overflow.
1749 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1750 /// of this operation. All ordering modes are possible. Note that using
1751 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1752 /// using [`Release`] makes the load part [`Relaxed`].
1754 /// **Note**: This method is only available on platforms that support atomic operations on
1755 #[doc = concat!("[`", $s_int_type, "`].")]
1760 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1762 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
1763 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1764 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1769 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1770 // SAFETY: data races are prevented by atomic intrinsics.
1771 unsafe { atomic_sub(self.v.get(), val, order) }
1774 /// Bitwise "and" with the current value.
1776 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1777 /// sets the new value to the result.
1779 /// Returns the previous value.
1781 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1782 /// of this operation. All ordering modes are possible. Note that using
1783 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1784 /// using [`Release`] makes the load part [`Relaxed`].
1786 /// **Note**: This method is only available on platforms that support atomic operations on
1787 #[doc = concat!("[`", $s_int_type, "`].")]
1792 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1794 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1795 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1796 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1801 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1802 // SAFETY: data races are prevented by atomic intrinsics.
1803 unsafe { atomic_and(self.v.get(), val, order) }
1806 /// Bitwise "nand" with the current value.
1808 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
1809 /// sets the new value to the result.
1811 /// Returns the previous value.
1813 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1814 /// of this operation. All ordering modes are possible. Note that using
1815 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1816 /// using [`Release`] makes the load part [`Relaxed`].
1818 /// **Note**: This method is only available on platforms that support atomic operations on
1819 #[doc = concat!("[`", $s_int_type, "`].")]
1824 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1826 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
1827 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1828 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1833 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1834 // SAFETY: data races are prevented by atomic intrinsics.
1835 unsafe { atomic_nand(self.v.get(), val, order) }
1838 /// Bitwise "or" with the current value.
1840 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1841 /// sets the new value to the result.
1843 /// Returns the previous value.
1845 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1846 /// of this operation. All ordering modes are possible. Note that using
1847 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1848 /// using [`Release`] makes the load part [`Relaxed`].
1850 /// **Note**: This method is only available on platforms that support atomic operations on
1851 #[doc = concat!("[`", $s_int_type, "`].")]
1856 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1858 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1859 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1860 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1865 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1866 // SAFETY: data races are prevented by atomic intrinsics.
1867 unsafe { atomic_or(self.v.get(), val, order) }
1870 /// Bitwise "xor" with the current value.
1872 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1873 /// sets the new value to the result.
1875 /// Returns the previous value.
1877 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1878 /// of this operation. All ordering modes are possible. Note that using
1879 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1880 /// using [`Release`] makes the load part [`Relaxed`].
1882 /// **Note**: This method is only available on platforms that support atomic operations on
1883 #[doc = concat!("[`", $s_int_type, "`].")]
1888 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1890 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1891 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1892 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1897 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1898 // SAFETY: data races are prevented by atomic intrinsics.
1899 unsafe { atomic_xor(self.v.get(), val, order) }
1902 /// Fetches the value, and applies a function to it that returns an optional
1903 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1904 /// `Err(previous_value)`.
1906 /// Note: This may call the function multiple times if the value has been changed from other threads in
1907 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1908 /// only once to the stored value.
1910 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
1911 /// The first describes the required ordering for when the operation finally succeeds while the second
1912 /// describes the required ordering for loads. These correspond to the success and failure orderings of
1913 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
1916 /// Using [`Acquire`] as success ordering makes the store part
1917 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1918 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1919 /// and must be equivalent to or weaker than the success ordering.
1921 /// **Note**: This method is only available on platforms that support atomic operations on
1922 #[doc = concat!("[`", $s_int_type, "`].")]
1927 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1929 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
1930 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
1931 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
1932 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
1933 /// assert_eq!(x.load(Ordering::SeqCst), 9);
1936 #[stable(feature = "no_more_cas", since = "1.45.0")]
1938 pub fn fetch_update<F>(&self,
1939 set_order: Ordering,
1940 fetch_order: Ordering,
1941 mut f: F) -> Result<$int_type, $int_type>
1942 where F: FnMut($int_type) -> Option<$int_type> {
1943 let mut prev = self.load(fetch_order);
1944 while let Some(next) = f(prev) {
1945 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1946 x @ Ok(_) => return x,
1947 Err(next_prev) => prev = next_prev
1953 /// Maximum with the current value.
1955 /// Finds the maximum of the current value and the argument `val`, and
1956 /// sets the new value to the result.
1958 /// Returns the previous value.
1960 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1961 /// of this operation. All ordering modes are possible. Note that using
1962 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1963 /// using [`Release`] makes the load part [`Relaxed`].
1965 /// **Note**: This method is only available on platforms that support atomic operations on
1966 #[doc = concat!("[`", $s_int_type, "`].")]
1971 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1973 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
1974 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1975 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
1978 /// If you want to obtain the maximum value in one step, you can use the following:
1981 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1983 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
1985 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1986 /// assert!(max_foo == 42);
1989 #[stable(feature = "atomic_min_max", since = "1.45.0")]
1991 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1992 // SAFETY: data races are prevented by atomic intrinsics.
1993 unsafe { $max_fn(self.v.get(), val, order) }
1996 /// Minimum with the current value.
1998 /// Finds the minimum of the current value and the argument `val`, and
1999 /// sets the new value to the result.
2001 /// Returns the previous value.
2003 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
2004 /// of this operation. All ordering modes are possible. Note that using
2005 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2006 /// using [`Release`] makes the load part [`Relaxed`].
2008 /// **Note**: This method is only available on platforms that support atomic operations on
2009 #[doc = concat!("[`", $s_int_type, "`].")]
2014 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2016 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2017 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
2018 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
2019 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
2020 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
2023 /// If you want to obtain the minimum value in one step, you can use the following:
2026 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2028 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2030 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
2031 /// assert_eq!(min_foo, 12);
2034 #[stable(feature = "atomic_min_max", since = "1.45.0")]
2036 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
2037 // SAFETY: data races are prevented by atomic intrinsics.
2038 unsafe { $min_fn(self.v.get(), val, order) }
2041 /// Returns a mutable pointer to the underlying integer.
2043 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
2044 /// This method is mostly useful for FFI, where the function signature may use
2045 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
2047 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2048 /// atomic types work with interior mutability. All modifications of an atomic change the value
2049 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2050 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
2051 /// restriction: operations on it must be atomic.
2055 /// ```ignore (extern-declaration)
2057 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2060 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
2063 #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
2065 // SAFETY: Safe as long as `my_atomic_op` is atomic.
2067 /// my_atomic_op(atomic.as_mut_ptr());
2072 #[unstable(feature = "atomic_mut_ptr",
2073 reason = "recently added",
2075 pub fn as_mut_ptr(&self) -> *mut $int_type {
2082 #[cfg(target_has_atomic_load_store = "8")]
2084 cfg(target_has_atomic = "8"),
2085 cfg(target_has_atomic_equal_alignment = "8"),
2086 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2087 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2088 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2089 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2090 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2091 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2092 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2093 unstable(feature = "integer_atomics", issue = "32976"),
2096 atomic_min, atomic_max,
2099 i8 AtomicI8 ATOMIC_I8_INIT
2101 #[cfg(target_has_atomic_load_store = "8")]
2103 cfg(target_has_atomic = "8"),
2104 cfg(target_has_atomic_equal_alignment = "8"),
2105 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2106 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2107 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2108 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2109 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2110 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2111 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2112 unstable(feature = "integer_atomics", issue = "32976"),
2115 atomic_umin, atomic_umax,
2118 u8 AtomicU8 ATOMIC_U8_INIT
2120 #[cfg(target_has_atomic_load_store = "16")]
2122 cfg(target_has_atomic = "16"),
2123 cfg(target_has_atomic_equal_alignment = "16"),
2124 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2125 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2126 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2127 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2128 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2129 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2130 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2131 unstable(feature = "integer_atomics", issue = "32976"),
2134 atomic_min, atomic_max,
2136 "AtomicI16::new(0)",
2137 i16 AtomicI16 ATOMIC_I16_INIT
2139 #[cfg(target_has_atomic_load_store = "16")]
2141 cfg(target_has_atomic = "16"),
2142 cfg(target_has_atomic_equal_alignment = "16"),
2143 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2144 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2145 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2146 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2147 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2148 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2149 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2150 unstable(feature = "integer_atomics", issue = "32976"),
2153 atomic_umin, atomic_umax,
2155 "AtomicU16::new(0)",
2156 u16 AtomicU16 ATOMIC_U16_INIT
2158 #[cfg(target_has_atomic_load_store = "32")]
2160 cfg(target_has_atomic = "32"),
2161 cfg(target_has_atomic_equal_alignment = "32"),
2162 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2163 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2164 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2165 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2166 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2167 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2168 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2169 unstable(feature = "integer_atomics", issue = "32976"),
2172 atomic_min, atomic_max,
2174 "AtomicI32::new(0)",
2175 i32 AtomicI32 ATOMIC_I32_INIT
2177 #[cfg(target_has_atomic_load_store = "32")]
2179 cfg(target_has_atomic = "32"),
2180 cfg(target_has_atomic_equal_alignment = "32"),
2181 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2182 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2183 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2184 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2185 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2186 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2187 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2188 unstable(feature = "integer_atomics", issue = "32976"),
2191 atomic_umin, atomic_umax,
2193 "AtomicU32::new(0)",
2194 u32 AtomicU32 ATOMIC_U32_INIT
2196 #[cfg(target_has_atomic_load_store = "64")]
2198 cfg(target_has_atomic = "64"),
2199 cfg(target_has_atomic_equal_alignment = "64"),
2200 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2201 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2202 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2203 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2204 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2205 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2206 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2207 unstable(feature = "integer_atomics", issue = "32976"),
2210 atomic_min, atomic_max,
2212 "AtomicI64::new(0)",
2213 i64 AtomicI64 ATOMIC_I64_INIT
2215 #[cfg(target_has_atomic_load_store = "64")]
2217 cfg(target_has_atomic = "64"),
2218 cfg(target_has_atomic_equal_alignment = "64"),
2219 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2220 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2221 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2222 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2223 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2224 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2225 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2226 unstable(feature = "integer_atomics", issue = "32976"),
2229 atomic_umin, atomic_umax,
2231 "AtomicU64::new(0)",
2232 u64 AtomicU64 ATOMIC_U64_INIT
2234 #[cfg(target_has_atomic_load_store = "128")]
2236 cfg(target_has_atomic = "128"),
2237 cfg(target_has_atomic_equal_alignment = "128"),
2238 unstable(feature = "integer_atomics", issue = "32976"),
2239 unstable(feature = "integer_atomics", issue = "32976"),
2240 unstable(feature = "integer_atomics", issue = "32976"),
2241 unstable(feature = "integer_atomics", issue = "32976"),
2242 unstable(feature = "integer_atomics", issue = "32976"),
2243 unstable(feature = "integer_atomics", issue = "32976"),
2244 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2245 unstable(feature = "integer_atomics", issue = "32976"),
2247 "#![feature(integer_atomics)]\n\n",
2248 atomic_min, atomic_max,
2250 "AtomicI128::new(0)",
2251 i128 AtomicI128 ATOMIC_I128_INIT
2253 #[cfg(target_has_atomic_load_store = "128")]
2255 cfg(target_has_atomic = "128"),
2256 cfg(target_has_atomic_equal_alignment = "128"),
2257 unstable(feature = "integer_atomics", issue = "32976"),
2258 unstable(feature = "integer_atomics", issue = "32976"),
2259 unstable(feature = "integer_atomics", issue = "32976"),
2260 unstable(feature = "integer_atomics", issue = "32976"),
2261 unstable(feature = "integer_atomics", issue = "32976"),
2262 unstable(feature = "integer_atomics", issue = "32976"),
2263 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2264 unstable(feature = "integer_atomics", issue = "32976"),
2266 "#![feature(integer_atomics)]\n\n",
2267 atomic_umin, atomic_umax,
2269 "AtomicU128::new(0)",
2270 u128 AtomicU128 ATOMIC_U128_INIT
2273 macro_rules! atomic_int_ptr_sized {
2274 ( $($target_pointer_width:literal $align:literal)* ) => { $(
2275 #[cfg(target_has_atomic_load_store = "ptr")]
2276 #[cfg(target_pointer_width = $target_pointer_width)]
2278 cfg(target_has_atomic = "ptr"),
2279 cfg(target_has_atomic_equal_alignment = "ptr"),
2280 stable(feature = "rust1", since = "1.0.0"),
2281 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2282 stable(feature = "atomic_debug", since = "1.3.0"),
2283 stable(feature = "atomic_access", since = "1.15.0"),
2284 stable(feature = "atomic_from", since = "1.23.0"),
2285 stable(feature = "atomic_nand", since = "1.27.0"),
2286 rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"),
2287 stable(feature = "rust1", since = "1.0.0"),
2290 atomic_min, atomic_max,
2292 "AtomicIsize::new(0)",
2293 isize AtomicIsize ATOMIC_ISIZE_INIT
2295 #[cfg(target_has_atomic_load_store = "ptr")]
2296 #[cfg(target_pointer_width = $target_pointer_width)]
2298 cfg(target_has_atomic = "ptr"),
2299 cfg(target_has_atomic_equal_alignment = "ptr"),
2300 stable(feature = "rust1", since = "1.0.0"),
2301 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2302 stable(feature = "atomic_debug", since = "1.3.0"),
2303 stable(feature = "atomic_access", since = "1.15.0"),
2304 stable(feature = "atomic_from", since = "1.23.0"),
2305 stable(feature = "atomic_nand", since = "1.27.0"),
2306 rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"),
2307 stable(feature = "rust1", since = "1.0.0"),
2310 atomic_umin, atomic_umax,
2312 "AtomicUsize::new(0)",
2313 usize AtomicUsize ATOMIC_USIZE_INIT
2318 atomic_int_ptr_sized! {
2325 #[cfg(target_has_atomic = "8")]
2326 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2337 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
2338 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
2341 Release => intrinsics::atomic_store_rel(dst, val),
2342 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2343 SeqCst => intrinsics::atomic_store(dst, val),
2344 Acquire => panic!("there is no such thing as an acquire store"),
2345 AcqRel => panic!("there is no such thing as an acquire/release store"),
2351 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
2352 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
2355 Acquire => intrinsics::atomic_load_acq(dst),
2356 Relaxed => intrinsics::atomic_load_relaxed(dst),
2357 SeqCst => intrinsics::atomic_load(dst),
2358 Release => panic!("there is no such thing as a release load"),
2359 AcqRel => panic!("there is no such thing as an acquire/release load"),
2365 #[cfg(target_has_atomic = "8")]
2366 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2367 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
2370 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2371 Release => intrinsics::atomic_xchg_rel(dst, val),
2372 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2373 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2374 SeqCst => intrinsics::atomic_xchg(dst, val),
2379 /// Returns the previous value (like __sync_fetch_and_add).
2381 #[cfg(target_has_atomic = "8")]
2382 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2383 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
2386 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2387 Release => intrinsics::atomic_xadd_rel(dst, val),
2388 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2389 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2390 SeqCst => intrinsics::atomic_xadd(dst, val),
2395 /// Returns the previous value (like __sync_fetch_and_sub).
2397 #[cfg(target_has_atomic = "8")]
2398 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2399 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
2402 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2403 Release => intrinsics::atomic_xsub_rel(dst, val),
2404 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2405 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2406 SeqCst => intrinsics::atomic_xsub(dst, val),
2412 #[cfg(target_has_atomic = "8")]
2413 unsafe fn atomic_compare_exchange<T: Copy>(
2420 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
2421 let (val, ok) = unsafe {
2422 match (success, failure) {
2423 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2424 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2425 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2426 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2427 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2428 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2429 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2430 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2431 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2432 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2433 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2434 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2437 if ok { Ok(val) } else { Err(val) }
2441 #[cfg(target_has_atomic = "8")]
2442 unsafe fn atomic_compare_exchange_weak<T: Copy>(
2449 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
2450 let (val, ok) = unsafe {
2451 match (success, failure) {
2452 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2453 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2454 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2455 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2456 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2457 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2458 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2459 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2460 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2461 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2462 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2463 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2466 if ok { Ok(val) } else { Err(val) }
2470 #[cfg(target_has_atomic = "8")]
2471 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2472 // SAFETY: the caller must uphold the safety contract for `atomic_and`
2475 Acquire => intrinsics::atomic_and_acq(dst, val),
2476 Release => intrinsics::atomic_and_rel(dst, val),
2477 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2478 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2479 SeqCst => intrinsics::atomic_and(dst, val),
2485 #[cfg(target_has_atomic = "8")]
2486 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2487 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
2490 Acquire => intrinsics::atomic_nand_acq(dst, val),
2491 Release => intrinsics::atomic_nand_rel(dst, val),
2492 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2493 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2494 SeqCst => intrinsics::atomic_nand(dst, val),
2500 #[cfg(target_has_atomic = "8")]
2501 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2502 // SAFETY: the caller must uphold the safety contract for `atomic_or`
2505 Acquire => intrinsics::atomic_or_acq(dst, val),
2506 Release => intrinsics::atomic_or_rel(dst, val),
2507 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2508 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2509 SeqCst => intrinsics::atomic_or(dst, val),
2515 #[cfg(target_has_atomic = "8")]
2516 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2517 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
2520 Acquire => intrinsics::atomic_xor_acq(dst, val),
2521 Release => intrinsics::atomic_xor_rel(dst, val),
2522 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2523 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2524 SeqCst => intrinsics::atomic_xor(dst, val),
2529 /// returns the max value (signed comparison)
2531 #[cfg(target_has_atomic = "8")]
2532 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2533 // SAFETY: the caller must uphold the safety contract for `atomic_max`
2536 Acquire => intrinsics::atomic_max_acq(dst, val),
2537 Release => intrinsics::atomic_max_rel(dst, val),
2538 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2539 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2540 SeqCst => intrinsics::atomic_max(dst, val),
2545 /// returns the min value (signed comparison)
2547 #[cfg(target_has_atomic = "8")]
2548 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2549 // SAFETY: the caller must uphold the safety contract for `atomic_min`
2552 Acquire => intrinsics::atomic_min_acq(dst, val),
2553 Release => intrinsics::atomic_min_rel(dst, val),
2554 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2555 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2556 SeqCst => intrinsics::atomic_min(dst, val),
2561 /// returns the max value (unsigned comparison)
2563 #[cfg(target_has_atomic = "8")]
2564 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2565 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
2568 Acquire => intrinsics::atomic_umax_acq(dst, val),
2569 Release => intrinsics::atomic_umax_rel(dst, val),
2570 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2571 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2572 SeqCst => intrinsics::atomic_umax(dst, val),
2577 /// returns the min value (unsigned comparison)
2579 #[cfg(target_has_atomic = "8")]
2580 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2581 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
2584 Acquire => intrinsics::atomic_umin_acq(dst, val),
2585 Release => intrinsics::atomic_umin_rel(dst, val),
2586 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2587 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2588 SeqCst => intrinsics::atomic_umin(dst, val),
2593 /// An atomic fence.
2595 /// Depending on the specified order, a fence prevents the compiler and CPU from
2596 /// reordering certain types of memory operations around it.
2597 /// That creates synchronizes-with relationships between it and atomic operations
2598 /// or fences in other threads.
2600 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2601 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2602 /// exist operations X and Y, both operating on some atomic object 'M' such
2603 /// that A is sequenced before X, Y is synchronized before B and Y observes
2604 /// the change to M. This provides a happens-before dependence between A and B.
2607 /// Thread 1 Thread 2
2609 /// fence(Release); A --------------
2610 /// x.store(3, Relaxed); X --------- |
2613 /// -------------> Y if x.load(Relaxed) == 3 {
2614 /// |-------> B fence(Acquire);
2619 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2622 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2623 /// and [`Release`] semantics, participates in the global program order of the
2624 /// other [`SeqCst`] operations and/or fences.
2626 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2630 /// Panics if `order` is [`Relaxed`].
2635 /// use std::sync::atomic::AtomicBool;
2636 /// use std::sync::atomic::fence;
2637 /// use std::sync::atomic::Ordering;
2639 /// // A mutual exclusion primitive based on spinlock.
2640 /// pub struct Mutex {
2641 /// flag: AtomicBool,
2645 /// pub fn new() -> Mutex {
2647 /// flag: AtomicBool::new(false),
2651 /// pub fn lock(&self) {
2652 /// // Wait until the old value is `false`.
2653 /// while self.flag.compare_and_swap(false, true, Ordering::Relaxed) != false {}
2654 /// // This fence synchronizes-with store in `unlock`.
2655 /// fence(Ordering::Acquire);
2658 /// pub fn unlock(&self) {
2659 /// self.flag.store(false, Ordering::Release);
2664 #[stable(feature = "rust1", since = "1.0.0")]
2665 pub fn fence(order: Ordering) {
2666 // SAFETY: using an atomic fence is safe.
2669 Acquire => intrinsics::atomic_fence_acq(),
2670 Release => intrinsics::atomic_fence_rel(),
2671 AcqRel => intrinsics::atomic_fence_acqrel(),
2672 SeqCst => intrinsics::atomic_fence(),
2673 Relaxed => panic!("there is no such thing as a relaxed fence"),
2678 /// A compiler memory fence.
2680 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2681 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2682 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2683 /// or writes from before or after the call to the other side of the call to
2684 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2685 /// from doing such re-ordering. This is not a problem in a single-threaded,
2686 /// execution context, but when other threads may modify memory at the same
2687 /// time, stronger synchronization primitives such as [`fence`] are required.
2689 /// The re-ordering prevented by the different ordering semantics are:
2691 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2692 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2693 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2694 /// - with [`AcqRel`], both of the above rules are enforced.
2696 /// `compiler_fence` is generally only useful for preventing a thread from
2697 /// racing *with itself*. That is, if a given thread is executing one piece
2698 /// of code, and is then interrupted, and starts executing code elsewhere
2699 /// (while still in the same thread, and conceptually still on the same
2700 /// core). In traditional programs, this can only occur when a signal
2701 /// handler is registered. In more low-level code, such situations can also
2702 /// arise when handling interrupts, when implementing green threads with
2703 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2704 /// discussion of [memory barriers].
2708 /// Panics if `order` is [`Relaxed`].
2712 /// Without `compiler_fence`, the `assert_eq!` in following code
2713 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2714 /// To see why, remember that the compiler is free to swap the stores to
2715 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2716 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2717 /// after `IS_READY` is updated, then the signal handler will see
2718 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2719 /// Using a `compiler_fence` remedies this situation.
2722 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2723 /// use std::sync::atomic::Ordering;
2724 /// use std::sync::atomic::compiler_fence;
2726 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2727 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2730 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2731 /// // prevent earlier writes from being moved beyond this point
2732 /// compiler_fence(Ordering::Release);
2733 /// IS_READY.store(true, Ordering::Relaxed);
2736 /// fn signal_handler() {
2737 /// if IS_READY.load(Ordering::Relaxed) {
2738 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2743 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2745 #[stable(feature = "compiler_fences", since = "1.21.0")]
2746 pub fn compiler_fence(order: Ordering) {
2747 // SAFETY: using an atomic fence is safe.
2750 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2751 Release => intrinsics::atomic_singlethreadfence_rel(),
2752 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2753 SeqCst => intrinsics::atomic_singlethreadfence(),
2754 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2759 #[cfg(target_has_atomic_load_store = "8")]
2760 #[stable(feature = "atomic_debug", since = "1.3.0")]
2761 impl fmt::Debug for AtomicBool {
2762 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2763 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2767 #[cfg(target_has_atomic_load_store = "ptr")]
2768 #[stable(feature = "atomic_debug", since = "1.3.0")]
2769 impl<T> fmt::Debug for AtomicPtr<T> {
2770 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2771 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2775 #[cfg(target_has_atomic_load_store = "ptr")]
2776 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2777 impl<T> fmt::Pointer for AtomicPtr<T> {
2778 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2779 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
2783 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
2785 /// This function is deprecated in favor of [`hint::spin_loop`].
2787 /// [`hint::spin_loop`]: crate::hint::spin_loop
2789 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
2790 #[rustc_deprecated(since = "1.51.0", reason = "use hint::spin_loop instead")]
2791 pub fn spin_loop_hint() {