3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! Each method takes an [`Ordering`] which represents the strength of
14 //! the memory barrier for that operation. These orderings are the
15 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
17 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
18 //! [2]: ../../../nomicon/atomics.html
20 //! Atomic variables are safe to share between threads (they implement [`Sync`])
21 //! but they do not themselves provide the mechanism for sharing and follow the
22 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
23 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
24 //! atomically-reference-counted shared pointer).
26 //! [arc]: ../../../std/sync/struct.Arc.html
28 //! Atomic types may be stored in static variables, initialized using
29 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
30 //! are often used for lazy global initialization.
34 //! All atomic types in this module are guaranteed to be [lock-free] if they're
35 //! available. This means they don't internally acquire a global mutex. Atomic
36 //! types and operations are not guaranteed to be wait-free. This means that
37 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
39 //! Atomic operations may be implemented at the instruction layer with
40 //! larger-size atomics. For example some platforms use 4-byte atomic
41 //! instructions to implement `AtomicI8`. Note that this emulation should not
42 //! have an impact on correctness of code, it's just something to be aware of.
44 //! The atomic types in this module may not be available on all platforms. The
45 //! atomic types here are all widely available, however, and can generally be
46 //! relied upon existing. Some notable exceptions are:
48 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
49 //! `AtomicI64` types.
50 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
52 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
54 //! Note that future platforms may be added that also do not have support for
55 //! some atomic operations. Maximally portable code will want to be careful
56 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
57 //! generally the most portable, but even then they're not available everywhere.
58 //! For reference, the `std` library requires pointer-sized atomics, although
61 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
62 //! conditionally compile in code with atomics. There is an unstable
63 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
65 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
69 //! A simple spinlock:
72 //! use std::sync::Arc;
73 //! use std::sync::atomic::{AtomicUsize, Ordering};
77 //! let spinlock = Arc::new(AtomicUsize::new(1));
79 //! let spinlock_clone = Arc::clone(&spinlock);
80 //! let thread = thread::spawn(move|| {
81 //! spinlock_clone.store(0, Ordering::SeqCst);
84 //! // Wait for the other thread to release the lock
85 //! while spinlock.load(Ordering::SeqCst) != 0 {}
87 //! if let Err(panic) = thread.join() {
88 //! println!("Thread had an error: {:?}", panic);
93 //! Keep a global count of live threads:
96 //! use std::sync::atomic::{AtomicUsize, Ordering};
98 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
100 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
101 //! println!("live threads: {}", old_thread_count + 1);
104 #![stable(feature = "rust1", since = "1.0.0")]
105 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
106 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
108 use self::Ordering::*;
110 use crate::cell::UnsafeCell;
112 use crate::intrinsics;
114 use crate::hint::spin_loop;
116 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
118 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
119 /// power or switching hyper-threads.
121 /// This function is different from [`std::thread::yield_now`] which directly yields to the
122 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
124 /// A common use case for `spin_loop_hint` is implementing bounded optimistic spinning in a CAS
125 /// loop in synchronization primitives. To avoid problems like priority inversion, it is strongly
126 /// recommended that the spin loop is terminated after a finite amount of iterations and an
127 /// appropriate blocking syscall is made.
129 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
130 /// do anything at all.
132 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
133 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
134 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
136 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
137 pub fn spin_loop_hint() {
141 /// A boolean type which can be safely shared between threads.
143 /// This type has the same in-memory representation as a [`bool`].
145 /// **Note**: This type is only available on platforms that support atomic
146 /// loads and stores of `u8`.
147 #[cfg(target_has_atomic_load_store = "8")]
148 #[stable(feature = "rust1", since = "1.0.0")]
150 pub struct AtomicBool {
154 #[cfg(target_has_atomic_load_store = "8")]
155 #[stable(feature = "rust1", since = "1.0.0")]
156 impl Default for AtomicBool {
157 /// Creates an `AtomicBool` initialized to `false`.
158 fn default() -> Self {
163 // Send is implicitly implemented for AtomicBool.
164 #[cfg(target_has_atomic_load_store = "8")]
165 #[stable(feature = "rust1", since = "1.0.0")]
166 unsafe impl Sync for AtomicBool {}
168 /// A raw pointer type which can be safely shared between threads.
170 /// This type has the same in-memory representation as a `*mut T`.
172 /// **Note**: This type is only available on platforms that support atomic
173 /// loads and stores of pointers. Its size depends on the target pointer's size.
174 #[cfg(target_has_atomic_load_store = "ptr")]
175 #[stable(feature = "rust1", since = "1.0.0")]
176 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
177 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
178 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
179 pub struct AtomicPtr<T> {
180 p: UnsafeCell<*mut T>,
183 #[cfg(target_has_atomic_load_store = "ptr")]
184 #[stable(feature = "rust1", since = "1.0.0")]
185 impl<T> Default for AtomicPtr<T> {
186 /// Creates a null `AtomicPtr<T>`.
187 fn default() -> AtomicPtr<T> {
188 AtomicPtr::new(crate::ptr::null_mut())
192 #[cfg(target_has_atomic_load_store = "ptr")]
193 #[stable(feature = "rust1", since = "1.0.0")]
194 unsafe impl<T> Send for AtomicPtr<T> {}
195 #[cfg(target_has_atomic_load_store = "ptr")]
196 #[stable(feature = "rust1", since = "1.0.0")]
197 unsafe impl<T> Sync for AtomicPtr<T> {}
199 /// Atomic memory orderings
201 /// Memory orderings specify the way atomic operations synchronize memory.
202 /// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
203 /// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
204 /// operations synchronize other memory while additionally preserving a total order of such
205 /// operations across all threads.
207 /// Rust's memory orderings are [the same as those of
208 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
210 /// For more information see the [nomicon].
212 /// [nomicon]: ../../../nomicon/atomics.html
213 #[stable(feature = "rust1", since = "1.0.0")]
214 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
217 /// No ordering constraints, only atomic operations.
219 /// Corresponds to [`memory_order_relaxed`] in C++20.
221 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
222 #[stable(feature = "rust1", since = "1.0.0")]
224 /// When coupled with a store, all previous operations become ordered
225 /// before any load of this value with [`Acquire`] (or stronger) ordering.
226 /// In particular, all previous writes become visible to all threads
227 /// that perform an [`Acquire`] (or stronger) load of this value.
229 /// Notice that using this ordering for an operation that combines loads
230 /// and stores leads to a [`Relaxed`] load operation!
232 /// This ordering is only applicable for operations that can perform a store.
234 /// Corresponds to [`memory_order_release`] in C++20.
236 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
237 #[stable(feature = "rust1", since = "1.0.0")]
239 /// When coupled with a load, if the loaded value was written by a store operation with
240 /// [`Release`] (or stronger) ordering, then all subsequent operations
241 /// become ordered after that store. In particular, all subsequent loads will see data
242 /// written before the store.
244 /// Notice that using this ordering for an operation that combines loads
245 /// and stores leads to a [`Relaxed`] store operation!
247 /// This ordering is only applicable for operations that can perform a load.
249 /// Corresponds to [`memory_order_acquire`] in C++20.
251 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
252 #[stable(feature = "rust1", since = "1.0.0")]
254 /// Has the effects of both [`Acquire`] and [`Release`] together:
255 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
257 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
258 /// not performing any store and hence it has just [`Acquire`] ordering. However,
259 /// `AcqRel` will never perform [`Relaxed`] accesses.
261 /// This ordering is only applicable for operations that combine both loads and stores.
263 /// Corresponds to [`memory_order_acq_rel`] in C++20.
265 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
266 #[stable(feature = "rust1", since = "1.0.0")]
268 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
269 /// operations, respectively) with the additional guarantee that all threads see all
270 /// sequentially consistent operations in the same order.
272 /// Corresponds to [`memory_order_seq_cst`] in C++20.
274 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
275 #[stable(feature = "rust1", since = "1.0.0")]
279 /// An [`AtomicBool`] initialized to `false`.
280 #[cfg(target_has_atomic_load_store = "8")]
281 #[stable(feature = "rust1", since = "1.0.0")]
284 reason = "the `new` function is now preferred",
285 suggestion = "AtomicBool::new(false)"
287 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
289 #[cfg(target_has_atomic_load_store = "8")]
291 /// Creates a new `AtomicBool`.
296 /// use std::sync::atomic::AtomicBool;
298 /// let atomic_true = AtomicBool::new(true);
299 /// let atomic_false = AtomicBool::new(false);
302 #[stable(feature = "rust1", since = "1.0.0")]
303 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
304 pub const fn new(v: bool) -> AtomicBool {
305 AtomicBool { v: UnsafeCell::new(v as u8) }
308 /// Returns a mutable reference to the underlying [`bool`].
310 /// This is safe because the mutable reference guarantees that no other threads are
311 /// concurrently accessing the atomic data.
316 /// use std::sync::atomic::{AtomicBool, Ordering};
318 /// let mut some_bool = AtomicBool::new(true);
319 /// assert_eq!(*some_bool.get_mut(), true);
320 /// *some_bool.get_mut() = false;
321 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
324 #[stable(feature = "atomic_access", since = "1.15.0")]
325 pub fn get_mut(&mut self) -> &mut bool {
326 // SAFETY: the mutable reference guarantees unique ownership.
327 unsafe { &mut *(self.v.get() as *mut bool) }
330 /// Get atomic access to a `&mut bool`.
335 /// #![feature(atomic_from_mut)]
336 /// use std::sync::atomic::{AtomicBool, Ordering};
338 /// let mut some_bool = true;
339 /// let a = AtomicBool::from_mut(&mut some_bool);
340 /// a.store(false, Ordering::Relaxed);
341 /// assert_eq!(some_bool, false);
344 #[cfg(target_has_atomic_equal_alignment = "8")]
345 #[unstable(feature = "atomic_from_mut", issue = "76314")]
346 pub fn from_mut(v: &mut bool) -> &Self {
347 // SAFETY: the mutable reference guarantees unique ownership, and
348 // alignment of both `bool` and `Self` is 1.
349 unsafe { &*(v as *mut bool as *mut Self) }
352 /// Consumes the atomic and returns the contained value.
354 /// This is safe because passing `self` by value guarantees that no other threads are
355 /// concurrently accessing the atomic data.
360 /// use std::sync::atomic::AtomicBool;
362 /// let some_bool = AtomicBool::new(true);
363 /// assert_eq!(some_bool.into_inner(), true);
366 #[stable(feature = "atomic_access", since = "1.15.0")]
367 pub fn into_inner(self) -> bool {
368 self.v.into_inner() != 0
371 /// Loads a value from the bool.
373 /// `load` takes an [`Ordering`] argument which describes the memory ordering
374 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
378 /// Panics if `order` is [`Release`] or [`AcqRel`].
383 /// use std::sync::atomic::{AtomicBool, Ordering};
385 /// let some_bool = AtomicBool::new(true);
387 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
390 #[stable(feature = "rust1", since = "1.0.0")]
391 pub fn load(&self, order: Ordering) -> bool {
392 // SAFETY: any data races are prevented by atomic intrinsics and the raw
393 // pointer passed in is valid because we got it from a reference.
394 unsafe { atomic_load(self.v.get(), order) != 0 }
397 /// Stores a value into the bool.
399 /// `store` takes an [`Ordering`] argument which describes the memory ordering
400 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
404 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
409 /// use std::sync::atomic::{AtomicBool, Ordering};
411 /// let some_bool = AtomicBool::new(true);
413 /// some_bool.store(false, Ordering::Relaxed);
414 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
417 #[stable(feature = "rust1", since = "1.0.0")]
418 pub fn store(&self, val: bool, order: Ordering) {
419 // SAFETY: any data races are prevented by atomic intrinsics and the raw
420 // pointer passed in is valid because we got it from a reference.
422 atomic_store(self.v.get(), val as u8, order);
426 /// Stores a value into the bool, returning the previous value.
428 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
429 /// of this operation. All ordering modes are possible. Note that using
430 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
431 /// using [`Release`] makes the load part [`Relaxed`].
433 /// **Note:** This method is only available on platforms that support atomic
434 /// operations on `u8`.
439 /// use std::sync::atomic::{AtomicBool, Ordering};
441 /// let some_bool = AtomicBool::new(true);
443 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
444 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
447 #[stable(feature = "rust1", since = "1.0.0")]
448 #[cfg(target_has_atomic = "8")]
449 pub fn swap(&self, val: bool, order: Ordering) -> bool {
450 // SAFETY: data races are prevented by atomic intrinsics.
451 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
454 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
456 /// The return value is always the previous value. If it is equal to `current`, then the value
459 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
460 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
461 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
462 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
463 /// happens, and using [`Release`] makes the load part [`Relaxed`].
465 /// **Note:** This method is only available on platforms that support atomic
466 /// operations on `u8`.
471 /// use std::sync::atomic::{AtomicBool, Ordering};
473 /// let some_bool = AtomicBool::new(true);
475 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
476 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
478 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
479 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
482 #[stable(feature = "rust1", since = "1.0.0")]
483 #[cfg(target_has_atomic = "8")]
484 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
485 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
491 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
493 /// The return value is a result indicating whether the new value was written and containing
494 /// the previous value. On success this value is guaranteed to be equal to `current`.
496 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
497 /// ordering of this operation. The first describes the required ordering if the
498 /// operation succeeds while the second describes the required ordering when the
499 /// operation fails. Using [`Acquire`] as success ordering makes the store part
500 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
501 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
502 /// and must be equivalent to or weaker than the success ordering.
504 /// **Note:** This method is only available on platforms that support atomic
505 /// operations on `u8`.
510 /// use std::sync::atomic::{AtomicBool, Ordering};
512 /// let some_bool = AtomicBool::new(true);
514 /// assert_eq!(some_bool.compare_exchange(true,
516 /// Ordering::Acquire,
517 /// Ordering::Relaxed),
519 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
521 /// assert_eq!(some_bool.compare_exchange(true, true,
522 /// Ordering::SeqCst,
523 /// Ordering::Acquire),
525 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
528 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
529 #[cfg(target_has_atomic = "8")]
530 pub fn compare_exchange(
536 ) -> Result<bool, bool> {
537 // SAFETY: data races are prevented by atomic intrinsics.
539 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
542 Err(x) => Err(x != 0),
546 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
548 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
549 /// comparison succeeds, which can result in more efficient code on some platforms. The
550 /// return value is a result indicating whether the new value was written and containing the
553 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
554 /// ordering of this operation. The first describes the required ordering if the
555 /// operation succeeds while the second describes the required ordering when the
556 /// operation fails. Using [`Acquire`] as success ordering makes the store part
557 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
558 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
559 /// and must be equivalent to or weaker than the success ordering.
561 /// **Note:** This method is only available on platforms that support atomic
562 /// operations on `u8`.
567 /// use std::sync::atomic::{AtomicBool, Ordering};
569 /// let val = AtomicBool::new(false);
572 /// let mut old = val.load(Ordering::Relaxed);
574 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
576 /// Err(x) => old = x,
581 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
582 #[cfg(target_has_atomic = "8")]
583 pub fn compare_exchange_weak(
589 ) -> Result<bool, bool> {
590 // SAFETY: data races are prevented by atomic intrinsics.
592 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
595 Err(x) => Err(x != 0),
599 /// Logical "and" with a boolean value.
601 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
602 /// the new value to the result.
604 /// Returns the previous value.
606 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
607 /// of this operation. All ordering modes are possible. Note that using
608 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
609 /// using [`Release`] makes the load part [`Relaxed`].
611 /// **Note:** This method is only available on platforms that support atomic
612 /// operations on `u8`.
617 /// use std::sync::atomic::{AtomicBool, Ordering};
619 /// let foo = AtomicBool::new(true);
620 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
621 /// assert_eq!(foo.load(Ordering::SeqCst), false);
623 /// let foo = AtomicBool::new(true);
624 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
625 /// assert_eq!(foo.load(Ordering::SeqCst), true);
627 /// let foo = AtomicBool::new(false);
628 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
629 /// assert_eq!(foo.load(Ordering::SeqCst), false);
632 #[stable(feature = "rust1", since = "1.0.0")]
633 #[cfg(target_has_atomic = "8")]
634 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
635 // SAFETY: data races are prevented by atomic intrinsics.
636 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
639 /// Logical "nand" with a boolean value.
641 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
642 /// the new value to the result.
644 /// Returns the previous value.
646 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
647 /// of this operation. All ordering modes are possible. Note that using
648 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
649 /// using [`Release`] makes the load part [`Relaxed`].
651 /// **Note:** This method is only available on platforms that support atomic
652 /// operations on `u8`.
657 /// use std::sync::atomic::{AtomicBool, Ordering};
659 /// let foo = AtomicBool::new(true);
660 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
661 /// assert_eq!(foo.load(Ordering::SeqCst), true);
663 /// let foo = AtomicBool::new(true);
664 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
665 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
666 /// assert_eq!(foo.load(Ordering::SeqCst), false);
668 /// let foo = AtomicBool::new(false);
669 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
670 /// assert_eq!(foo.load(Ordering::SeqCst), true);
673 #[stable(feature = "rust1", since = "1.0.0")]
674 #[cfg(target_has_atomic = "8")]
675 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
676 // We can't use atomic_nand here because it can result in a bool with
677 // an invalid value. This happens because the atomic operation is done
678 // with an 8-bit integer internally, which would set the upper 7 bits.
679 // So we just use fetch_xor or swap instead.
682 // We must invert the bool.
683 self.fetch_xor(true, order)
685 // !(x & false) == true
686 // We must set the bool to true.
687 self.swap(true, order)
691 /// Logical "or" with a boolean value.
693 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
694 /// new value to the result.
696 /// Returns the previous value.
698 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
699 /// of this operation. All ordering modes are possible. Note that using
700 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
701 /// using [`Release`] makes the load part [`Relaxed`].
703 /// **Note:** This method is only available on platforms that support atomic
704 /// operations on `u8`.
709 /// use std::sync::atomic::{AtomicBool, Ordering};
711 /// let foo = AtomicBool::new(true);
712 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
713 /// assert_eq!(foo.load(Ordering::SeqCst), true);
715 /// let foo = AtomicBool::new(true);
716 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
717 /// assert_eq!(foo.load(Ordering::SeqCst), true);
719 /// let foo = AtomicBool::new(false);
720 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
721 /// assert_eq!(foo.load(Ordering::SeqCst), false);
724 #[stable(feature = "rust1", since = "1.0.0")]
725 #[cfg(target_has_atomic = "8")]
726 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
727 // SAFETY: data races are prevented by atomic intrinsics.
728 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
731 /// Logical "xor" with a boolean value.
733 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
734 /// the new value to the result.
736 /// Returns the previous value.
738 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
739 /// of this operation. All ordering modes are possible. Note that using
740 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
741 /// using [`Release`] makes the load part [`Relaxed`].
743 /// **Note:** This method is only available on platforms that support atomic
744 /// operations on `u8`.
749 /// use std::sync::atomic::{AtomicBool, Ordering};
751 /// let foo = AtomicBool::new(true);
752 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
753 /// assert_eq!(foo.load(Ordering::SeqCst), true);
755 /// let foo = AtomicBool::new(true);
756 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
757 /// assert_eq!(foo.load(Ordering::SeqCst), false);
759 /// let foo = AtomicBool::new(false);
760 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
761 /// assert_eq!(foo.load(Ordering::SeqCst), false);
764 #[stable(feature = "rust1", since = "1.0.0")]
765 #[cfg(target_has_atomic = "8")]
766 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
767 // SAFETY: data races are prevented by atomic intrinsics.
768 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
771 /// Returns a mutable pointer to the underlying [`bool`].
773 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
774 /// This method is mostly useful for FFI, where the function signature may use
775 /// `*mut bool` instead of `&AtomicBool`.
777 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
778 /// atomic types work with interior mutability. All modifications of an atomic change the value
779 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
780 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
781 /// restriction: operations on it must be atomic.
785 /// ```ignore (extern-declaration)
787 /// use std::sync::atomic::AtomicBool;
789 /// fn my_atomic_op(arg: *mut bool);
792 /// let mut atomic = AtomicBool::new(true);
794 /// my_atomic_op(atomic.as_mut_ptr());
799 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
800 pub fn as_mut_ptr(&self) -> *mut bool {
801 self.v.get() as *mut bool
805 #[cfg(target_has_atomic_load_store = "ptr")]
806 impl<T> AtomicPtr<T> {
807 /// Creates a new `AtomicPtr`.
812 /// use std::sync::atomic::AtomicPtr;
814 /// let ptr = &mut 5;
815 /// let atomic_ptr = AtomicPtr::new(ptr);
818 #[stable(feature = "rust1", since = "1.0.0")]
819 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
820 pub const fn new(p: *mut T) -> AtomicPtr<T> {
821 AtomicPtr { p: UnsafeCell::new(p) }
824 /// Returns a mutable reference to the underlying pointer.
826 /// This is safe because the mutable reference guarantees that no other threads are
827 /// concurrently accessing the atomic data.
832 /// use std::sync::atomic::{AtomicPtr, Ordering};
834 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
835 /// *atomic_ptr.get_mut() = &mut 5;
836 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
839 #[stable(feature = "atomic_access", since = "1.15.0")]
840 pub fn get_mut(&mut self) -> &mut *mut T {
844 /// Get atomic access to a pointer.
849 /// #![feature(atomic_from_mut)]
850 /// use std::sync::atomic::{AtomicPtr, Ordering};
852 /// let mut some_ptr = &mut 123 as *mut i32;
853 /// let a = AtomicPtr::from_mut(&mut some_ptr);
854 /// a.store(&mut 456, Ordering::Relaxed);
855 /// assert_eq!(unsafe { *some_ptr }, 456);
858 #[cfg(target_has_atomic_equal_alignment = "ptr")]
859 #[unstable(feature = "atomic_from_mut", issue = "76314")]
860 pub fn from_mut(v: &mut *mut T) -> &Self {
861 use crate::mem::align_of;
862 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
864 // - the mutable reference guarantees unique ownership.
865 // - the alignment of `*mut T` and `Self` is the same on all platforms
866 // supported by rust, as verified above.
867 unsafe { &*(v as *mut *mut T as *mut Self) }
870 /// Consumes the atomic and returns the contained value.
872 /// This is safe because passing `self` by value guarantees that no other threads are
873 /// concurrently accessing the atomic data.
878 /// use std::sync::atomic::AtomicPtr;
880 /// let atomic_ptr = AtomicPtr::new(&mut 5);
881 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
884 #[stable(feature = "atomic_access", since = "1.15.0")]
885 pub fn into_inner(self) -> *mut T {
889 /// Loads a value from the pointer.
891 /// `load` takes an [`Ordering`] argument which describes the memory ordering
892 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
896 /// Panics if `order` is [`Release`] or [`AcqRel`].
901 /// use std::sync::atomic::{AtomicPtr, Ordering};
903 /// let ptr = &mut 5;
904 /// let some_ptr = AtomicPtr::new(ptr);
906 /// let value = some_ptr.load(Ordering::Relaxed);
909 #[stable(feature = "rust1", since = "1.0.0")]
910 pub fn load(&self, order: Ordering) -> *mut T {
911 // SAFETY: data races are prevented by atomic intrinsics.
912 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
915 /// Stores a value into the pointer.
917 /// `store` takes an [`Ordering`] argument which describes the memory ordering
918 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
922 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
927 /// use std::sync::atomic::{AtomicPtr, Ordering};
929 /// let ptr = &mut 5;
930 /// let some_ptr = AtomicPtr::new(ptr);
932 /// let other_ptr = &mut 10;
934 /// some_ptr.store(other_ptr, Ordering::Relaxed);
937 #[stable(feature = "rust1", since = "1.0.0")]
938 pub fn store(&self, ptr: *mut T, order: Ordering) {
939 // SAFETY: data races are prevented by atomic intrinsics.
941 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
945 /// Stores a value into the pointer, returning the previous value.
947 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
948 /// of this operation. All ordering modes are possible. Note that using
949 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
950 /// using [`Release`] makes the load part [`Relaxed`].
952 /// **Note:** This method is only available on platforms that support atomic
953 /// operations on pointers.
958 /// use std::sync::atomic::{AtomicPtr, Ordering};
960 /// let ptr = &mut 5;
961 /// let some_ptr = AtomicPtr::new(ptr);
963 /// let other_ptr = &mut 10;
965 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
968 #[stable(feature = "rust1", since = "1.0.0")]
969 #[cfg(target_has_atomic = "ptr")]
970 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
971 // SAFETY: data races are prevented by atomic intrinsics.
972 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
975 /// Stores a value into the pointer if the current value is the same as the `current` value.
977 /// The return value is always the previous value. If it is equal to `current`, then the value
980 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
981 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
982 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
983 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
984 /// happens, and using [`Release`] makes the load part [`Relaxed`].
986 /// **Note:** This method is only available on platforms that support atomic
987 /// operations on pointers.
992 /// use std::sync::atomic::{AtomicPtr, Ordering};
994 /// let ptr = &mut 5;
995 /// let some_ptr = AtomicPtr::new(ptr);
997 /// let other_ptr = &mut 10;
999 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1002 #[stable(feature = "rust1", since = "1.0.0")]
1003 #[cfg(target_has_atomic = "ptr")]
1004 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1005 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1011 /// Stores a value into the pointer if the current value is the same as the `current` value.
1013 /// The return value is a result indicating whether the new value was written and containing
1014 /// the previous value. On success this value is guaranteed to be equal to `current`.
1016 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1017 /// ordering of this operation. The first describes the required ordering if the
1018 /// operation succeeds while the second describes the required ordering when the
1019 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1020 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1021 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1022 /// and must be equivalent to or weaker than the success ordering.
1024 /// **Note:** This method is only available on platforms that support atomic
1025 /// operations on pointers.
1030 /// use std::sync::atomic::{AtomicPtr, Ordering};
1032 /// let ptr = &mut 5;
1033 /// let some_ptr = AtomicPtr::new(ptr);
1035 /// let other_ptr = &mut 10;
1037 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1038 /// Ordering::SeqCst, Ordering::Relaxed);
1041 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1042 #[cfg(target_has_atomic = "ptr")]
1043 pub fn compare_exchange(
1049 ) -> Result<*mut T, *mut T> {
1050 // SAFETY: data races are prevented by atomic intrinsics.
1052 let res = atomic_compare_exchange(
1053 self.p.get() as *mut usize,
1060 Ok(x) => Ok(x as *mut T),
1061 Err(x) => Err(x as *mut T),
1066 /// Stores a value into the pointer if the current value is the same as the `current` value.
1068 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1069 /// comparison succeeds, which can result in more efficient code on some platforms. The
1070 /// return value is a result indicating whether the new value was written and containing the
1073 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1074 /// ordering of this operation. The first describes the required ordering if the
1075 /// operation succeeds while the second describes the required ordering when the
1076 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1077 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1078 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1079 /// and must be equivalent to or weaker than the success ordering.
1081 /// **Note:** This method is only available on platforms that support atomic
1082 /// operations on pointers.
1087 /// use std::sync::atomic::{AtomicPtr, Ordering};
1089 /// let some_ptr = AtomicPtr::new(&mut 5);
1091 /// let new = &mut 10;
1092 /// let mut old = some_ptr.load(Ordering::Relaxed);
1094 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1096 /// Err(x) => old = x,
1101 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1102 #[cfg(target_has_atomic = "ptr")]
1103 pub fn compare_exchange_weak(
1109 ) -> Result<*mut T, *mut T> {
1110 // SAFETY: data races are prevented by atomic intrinsics.
1112 let res = atomic_compare_exchange_weak(
1113 self.p.get() as *mut usize,
1120 Ok(x) => Ok(x as *mut T),
1121 Err(x) => Err(x as *mut T),
1127 #[cfg(target_has_atomic_load_store = "8")]
1128 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1129 impl From<bool> for AtomicBool {
1130 /// Converts a `bool` into an `AtomicBool`.
1135 /// use std::sync::atomic::AtomicBool;
1136 /// let atomic_bool = AtomicBool::from(true);
1137 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1140 fn from(b: bool) -> Self {
1145 #[cfg(target_has_atomic_load_store = "ptr")]
1146 #[stable(feature = "atomic_from", since = "1.23.0")]
1147 impl<T> From<*mut T> for AtomicPtr<T> {
1149 fn from(p: *mut T) -> Self {
1154 #[allow(unused_macros)] // This macro ends up being unused on some architectures.
1155 macro_rules! if_not_8_bit {
1156 (u8, $($tt:tt)*) => { "" };
1157 (i8, $($tt:tt)*) => { "" };
1158 ($_:ident, $($tt:tt)*) => { $($tt)* };
1161 #[cfg(target_has_atomic_load_store = "8")]
1162 macro_rules! atomic_int {
1168 $stable_access:meta,
1172 $stable_init_const:meta,
1173 $s_int_type:literal, $int_ref:expr,
1174 $extra_feature:expr,
1175 $min_fn:ident, $max_fn:ident,
1178 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1179 /// An integer type which can be safely shared between threads.
1181 /// This type has the same in-memory representation as the underlying
1182 /// integer type, [`
1183 #[doc = $s_int_type]
1186 /// ). For more about the differences between atomic types and
1187 /// non-atomic types as well as information about the portability of
1188 /// this type, please see the [module-level documentation].
1190 /// **Note:** This type is only available on platforms that support
1191 /// atomic loads and stores of [`
1192 #[doc = $s_int_type]
1197 /// [module-level documentation]: crate::sync::atomic
1199 #[repr(C, align($align))]
1200 pub struct $atomic_type {
1201 v: UnsafeCell<$int_type>,
1204 /// An atomic integer initialized to `0`.
1205 #[$stable_init_const]
1208 reason = "the `new` function is now preferred",
1209 suggestion = $atomic_new,
1211 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1214 impl Default for $atomic_type {
1215 fn default() -> Self {
1216 Self::new(Default::default())
1221 impl From<$int_type> for $atomic_type {
1224 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1226 fn from(v: $int_type) -> Self { Self::new(v) }
1231 impl fmt::Debug for $atomic_type {
1232 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1233 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1237 // Send is implicitly implemented.
1239 unsafe impl Sync for $atomic_type {}
1243 concat!("Creates a new atomic integer.
1248 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1250 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1255 pub const fn new(v: $int_type) -> Self {
1256 Self {v: UnsafeCell::new(v)}
1261 concat!("Returns a mutable reference to the underlying integer.
1263 This is safe because the mutable reference guarantees that no other threads are
1264 concurrently accessing the atomic data.
1269 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1271 let mut some_var = ", stringify!($atomic_type), "::new(10);
1272 assert_eq!(*some_var.get_mut(), 10);
1273 *some_var.get_mut() = 5;
1274 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1278 pub fn get_mut(&mut self) -> &mut $int_type {
1284 concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.
1290 "**Note:** This function is only available on targets where `",
1291 stringify!($int_type), "` has an alignment of ", $align, " bytes."
1299 #![feature(atomic_from_mut)]
1300 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1302 let mut some_int = 123;
1303 let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);
1304 a.store(100, Ordering::Relaxed);
1305 assert_eq!(some_int, 100);
1310 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1311 pub fn from_mut(v: &mut $int_type) -> &Self {
1312 use crate::mem::align_of;
1313 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
1315 // - the mutable reference guarantees unique ownership.
1316 // - the alignment of `$int_type` and `Self` is the
1317 // same, as promised by $cfg_align and verified above.
1318 unsafe { &*(v as *mut $int_type as *mut Self) }
1323 concat!("Consumes the atomic and returns the contained value.
1325 This is safe because passing `self` by value guarantees that no other threads are
1326 concurrently accessing the atomic data.
1331 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1333 let some_var = ", stringify!($atomic_type), "::new(5);
1334 assert_eq!(some_var.into_inner(), 5);
1338 pub fn into_inner(self) -> $int_type {
1344 concat!("Loads a value from the atomic integer.
1346 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1347 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1351 Panics if `order` is [`Release`] or [`AcqRel`].
1356 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1358 let some_var = ", stringify!($atomic_type), "::new(5);
1360 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1364 pub fn load(&self, order: Ordering) -> $int_type {
1365 // SAFETY: data races are prevented by atomic intrinsics.
1366 unsafe { atomic_load(self.v.get(), order) }
1371 concat!("Stores a value into the atomic integer.
1373 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1374 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1378 Panics if `order` is [`Acquire`] or [`AcqRel`].
1383 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1385 let some_var = ", stringify!($atomic_type), "::new(5);
1387 some_var.store(10, Ordering::Relaxed);
1388 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1392 pub fn store(&self, val: $int_type, order: Ordering) {
1393 // SAFETY: data races are prevented by atomic intrinsics.
1394 unsafe { atomic_store(self.v.get(), val, order); }
1399 concat!("Stores a value into the atomic integer, returning the previous value.
1401 `swap` takes an [`Ordering`] argument which describes the memory ordering
1402 of this operation. All ordering modes are possible. Note that using
1403 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1404 using [`Release`] makes the load part [`Relaxed`].
1406 **Note**: This method is only available on platforms that support atomic
1407 operations on [`", $s_int_type, "`](", $int_ref, ").
1412 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1414 let some_var = ", stringify!($atomic_type), "::new(5);
1416 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1421 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1422 // SAFETY: data races are prevented by atomic intrinsics.
1423 unsafe { atomic_swap(self.v.get(), val, order) }
1428 concat!("Stores a value into the atomic integer if the current value is the same as
1429 the `current` value.
1431 The return value is always the previous value. If it is equal to `current`, then the
1434 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1435 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1436 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1437 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1438 happens, and using [`Release`] makes the load part [`Relaxed`].
1440 **Note**: This method is only available on platforms that support atomic
1441 operations on [`", $s_int_type, "`](", $int_ref, ").
1446 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1448 let some_var = ", stringify!($atomic_type), "::new(5);
1450 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1451 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1453 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1454 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1459 pub fn compare_and_swap(&self,
1462 order: Ordering) -> $int_type {
1463 match self.compare_exchange(current,
1466 strongest_failure_ordering(order)) {
1474 concat!("Stores a value into the atomic integer if the current value is the same as
1475 the `current` value.
1477 The return value is a result indicating whether the new value was written and
1478 containing the previous value. On success this value is guaranteed to be equal to
1481 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1482 ordering of this operation. The first describes the required ordering if the
1483 operation succeeds while the second describes the required ordering when the
1484 operation fails. Using [`Acquire`] as success ordering makes the store part
1485 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1486 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1487 and must be equivalent to or weaker than the success ordering.
1489 **Note**: This method is only available on platforms that support atomic
1490 operations on [`", $s_int_type, "`](", $int_ref, ").
1495 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1497 let some_var = ", stringify!($atomic_type), "::new(5);
1499 assert_eq!(some_var.compare_exchange(5, 10,
1503 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1505 assert_eq!(some_var.compare_exchange(6, 12,
1509 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1514 pub fn compare_exchange(&self,
1518 failure: Ordering) -> Result<$int_type, $int_type> {
1519 // SAFETY: data races are prevented by atomic intrinsics.
1520 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1525 concat!("Stores a value into the atomic integer if the current value is the same as
1526 the `current` value.
1528 Unlike [`", stringify!($atomic_type), "::compare_exchange`], this function is allowed to spuriously fail even
1529 when the comparison succeeds, which can result in more efficient code on some
1530 platforms. The return value is a result indicating whether the new value was
1531 written and containing the previous value.
1533 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1534 ordering of this operation. The first describes the required ordering if the
1535 operation succeeds while the second describes the required ordering when the
1536 operation fails. Using [`Acquire`] as success ordering makes the store part
1537 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1538 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1539 and must be equivalent to or weaker than the success ordering.
1541 **Note**: This method is only available on platforms that support atomic
1542 operations on [`", $s_int_type, "`](", $int_ref, ").
1547 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1549 let val = ", stringify!($atomic_type), "::new(4);
1551 let mut old = val.load(Ordering::Relaxed);
1554 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1563 pub fn compare_exchange_weak(&self,
1567 failure: Ordering) -> Result<$int_type, $int_type> {
1568 // SAFETY: data races are prevented by atomic intrinsics.
1570 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1576 concat!("Adds to the current value, returning the previous value.
1578 This operation wraps around on overflow.
1580 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1581 of this operation. All ordering modes are possible. Note that using
1582 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1583 using [`Release`] makes the load part [`Relaxed`].
1585 **Note**: This method is only available on platforms that support atomic
1586 operations on [`", $s_int_type, "`](", $int_ref, ").
1591 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1593 let foo = ", stringify!($atomic_type), "::new(0);
1594 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1595 assert_eq!(foo.load(Ordering::SeqCst), 10);
1600 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1601 // SAFETY: data races are prevented by atomic intrinsics.
1602 unsafe { atomic_add(self.v.get(), val, order) }
1607 concat!("Subtracts from the current value, returning the previous value.
1609 This operation wraps around on overflow.
1611 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1612 of this operation. All ordering modes are possible. Note that using
1613 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1614 using [`Release`] makes the load part [`Relaxed`].
1616 **Note**: This method is only available on platforms that support atomic
1617 operations on [`", $s_int_type, "`](", $int_ref, ").
1622 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1624 let foo = ", stringify!($atomic_type), "::new(20);
1625 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1626 assert_eq!(foo.load(Ordering::SeqCst), 10);
1631 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1632 // SAFETY: data races are prevented by atomic intrinsics.
1633 unsafe { atomic_sub(self.v.get(), val, order) }
1638 concat!("Bitwise \"and\" with the current value.
1640 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1641 sets the new value to the result.
1643 Returns the previous value.
1645 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1646 of this operation. All ordering modes are possible. Note that using
1647 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1648 using [`Release`] makes the load part [`Relaxed`].
1650 **Note**: This method is only available on platforms that support atomic
1651 operations on [`", $s_int_type, "`](", $int_ref, ").
1656 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1658 let foo = ", stringify!($atomic_type), "::new(0b101101);
1659 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1660 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1665 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1666 // SAFETY: data races are prevented by atomic intrinsics.
1667 unsafe { atomic_and(self.v.get(), val, order) }
1672 concat!("Bitwise \"nand\" with the current value.
1674 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1675 sets the new value to the result.
1677 Returns the previous value.
1679 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1680 of this operation. All ordering modes are possible. Note that using
1681 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1682 using [`Release`] makes the load part [`Relaxed`].
1684 **Note**: This method is only available on platforms that support atomic
1685 operations on [`", $s_int_type, "`](", $int_ref, ").
1690 ", $extra_feature, "
1691 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1693 let foo = ", stringify!($atomic_type), "::new(0x13);
1694 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1695 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1700 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1701 // SAFETY: data races are prevented by atomic intrinsics.
1702 unsafe { atomic_nand(self.v.get(), val, order) }
1707 concat!("Bitwise \"or\" with the current value.
1709 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1710 sets the new value to the result.
1712 Returns the previous value.
1714 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1715 of this operation. All ordering modes are possible. Note that using
1716 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1717 using [`Release`] makes the load part [`Relaxed`].
1719 **Note**: This method is only available on platforms that support atomic
1720 operations on [`", $s_int_type, "`](", $int_ref, ").
1725 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1727 let foo = ", stringify!($atomic_type), "::new(0b101101);
1728 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1729 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1734 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1735 // SAFETY: data races are prevented by atomic intrinsics.
1736 unsafe { atomic_or(self.v.get(), val, order) }
1741 concat!("Bitwise \"xor\" with the current value.
1743 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1744 sets the new value to the result.
1746 Returns the previous value.
1748 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1749 of this operation. All ordering modes are possible. Note that using
1750 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1751 using [`Release`] makes the load part [`Relaxed`].
1753 **Note**: This method is only available on platforms that support atomic
1754 operations on [`", $s_int_type, "`](", $int_ref, ").
1759 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1761 let foo = ", stringify!($atomic_type), "::new(0b101101);
1762 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1763 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1768 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1769 // SAFETY: data races are prevented by atomic intrinsics.
1770 unsafe { atomic_xor(self.v.get(), val, order) }
1775 concat!("Fetches the value, and applies a function to it that returns an optional
1776 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1777 `Err(previous_value)`.
1779 Note: This may call the function multiple times if the value has been changed from other threads in
1780 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1781 only once to the stored value.
1783 `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
1784 The first describes the required ordering for when the operation finally succeeds while the second
1785 describes the required ordering for loads. These correspond to the success and failure orderings of
1786 [`", stringify!($atomic_type), "::compare_exchange`] respectively.
1788 Using [`Acquire`] as success ordering makes the store part
1789 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1790 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1791 and must be equivalent to or weaker than the success ordering.
1793 **Note**: This method is only available on platforms that support atomic
1794 operations on [`", $s_int_type, "`](", $int_ref, ").
1799 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1801 let x = ", stringify!($atomic_type), "::new(7);
1802 assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
1803 assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
1804 assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
1805 assert_eq!(x.load(Ordering::SeqCst), 9);
1808 #[stable(feature = "no_more_cas", since = "1.45.0")]
1810 pub fn fetch_update<F>(&self,
1811 set_order: Ordering,
1812 fetch_order: Ordering,
1813 mut f: F) -> Result<$int_type, $int_type>
1814 where F: FnMut($int_type) -> Option<$int_type> {
1815 let mut prev = self.load(fetch_order);
1816 while let Some(next) = f(prev) {
1817 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1818 x @ Ok(_) => return x,
1819 Err(next_prev) => prev = next_prev
1827 concat!("Maximum with the current value.
1829 Finds the maximum of the current value and the argument `val`, and
1830 sets the new value to the result.
1832 Returns the previous value.
1834 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1835 of this operation. All ordering modes are possible. Note that using
1836 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1837 using [`Release`] makes the load part [`Relaxed`].
1839 **Note**: This method is only available on platforms that support atomic
1840 operations on [`", $s_int_type, "`](", $int_ref, ").
1845 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1847 let foo = ", stringify!($atomic_type), "::new(23);
1848 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1849 assert_eq!(foo.load(Ordering::SeqCst), 42);
1852 If you want to obtain the maximum value in one step, you can use the following:
1855 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1857 let foo = ", stringify!($atomic_type), "::new(23);
1859 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1860 assert!(max_foo == 42);
1863 #[stable(feature = "atomic_min_max", since = "1.45.0")]
1865 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1866 // SAFETY: data races are prevented by atomic intrinsics.
1867 unsafe { $max_fn(self.v.get(), val, order) }
1872 concat!("Minimum with the current value.
1874 Finds the minimum of the current value and the argument `val`, and
1875 sets the new value to the result.
1877 Returns the previous value.
1879 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1880 of this operation. All ordering modes are possible. Note that using
1881 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1882 using [`Release`] makes the load part [`Relaxed`].
1884 **Note**: This method is only available on platforms that support atomic
1885 operations on [`", $s_int_type, "`](", $int_ref, ").
1890 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1892 let foo = ", stringify!($atomic_type), "::new(23);
1893 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1894 assert_eq!(foo.load(Ordering::Relaxed), 23);
1895 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1896 assert_eq!(foo.load(Ordering::Relaxed), 22);
1899 If you want to obtain the minimum value in one step, you can use the following:
1902 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1904 let foo = ", stringify!($atomic_type), "::new(23);
1906 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1907 assert_eq!(min_foo, 12);
1910 #[stable(feature = "atomic_min_max", since = "1.45.0")]
1912 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1913 // SAFETY: data races are prevented by atomic intrinsics.
1914 unsafe { $min_fn(self.v.get(), val, order) }
1919 concat!("Returns a mutable pointer to the underlying integer.
1921 Doing non-atomic reads and writes on the resulting integer can be a data race.
1922 This method is mostly useful for FFI, where the function signature may use
1923 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1925 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1926 atomic types work with interior mutability. All modifications of an atomic change the value
1927 through a shared reference, and can do so safely as long as they use atomic operations. Any
1928 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
1929 restriction: operations on it must be atomic.
1933 ```ignore (extern-declaration)
1935 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1938 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1941 let mut atomic = ", stringify!($atomic_type), "::new(1);
1943 // SAFETY: Safe as long as `my_atomic_op` is atomic.
1945 my_atomic_op(atomic.as_mut_ptr());
1950 #[unstable(feature = "atomic_mut_ptr",
1951 reason = "recently added",
1953 pub fn as_mut_ptr(&self) -> *mut $int_type {
1961 #[cfg(target_has_atomic_load_store = "8")]
1963 cfg(target_has_atomic = "8"),
1964 cfg(target_has_atomic_equal_alignment = "8"),
1965 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1966 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1967 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1968 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1969 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1970 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1971 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
1972 unstable(feature = "integer_atomics", issue = "32976"),
1973 "i8", "../../../std/primitive.i8.html",
1975 atomic_min, atomic_max,
1978 i8 AtomicI8 ATOMIC_I8_INIT
1980 #[cfg(target_has_atomic_load_store = "8")]
1982 cfg(target_has_atomic = "8"),
1983 cfg(target_has_atomic_equal_alignment = "8"),
1984 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1985 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1986 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1987 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1988 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1989 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1990 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
1991 unstable(feature = "integer_atomics", issue = "32976"),
1992 "u8", "../../../std/primitive.u8.html",
1994 atomic_umin, atomic_umax,
1997 u8 AtomicU8 ATOMIC_U8_INIT
1999 #[cfg(target_has_atomic_load_store = "16")]
2001 cfg(target_has_atomic = "16"),
2002 cfg(target_has_atomic_equal_alignment = "16"),
2003 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2004 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2005 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2006 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2007 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2008 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2009 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2010 unstable(feature = "integer_atomics", issue = "32976"),
2011 "i16", "../../../std/primitive.i16.html",
2013 atomic_min, atomic_max,
2015 "AtomicI16::new(0)",
2016 i16 AtomicI16 ATOMIC_I16_INIT
2018 #[cfg(target_has_atomic_load_store = "16")]
2020 cfg(target_has_atomic = "16"),
2021 cfg(target_has_atomic_equal_alignment = "16"),
2022 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2023 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2024 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2025 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2026 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2027 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2028 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2029 unstable(feature = "integer_atomics", issue = "32976"),
2030 "u16", "../../../std/primitive.u16.html",
2032 atomic_umin, atomic_umax,
2034 "AtomicU16::new(0)",
2035 u16 AtomicU16 ATOMIC_U16_INIT
2037 #[cfg(target_has_atomic_load_store = "32")]
2039 cfg(target_has_atomic = "32"),
2040 cfg(target_has_atomic_equal_alignment = "32"),
2041 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2042 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2043 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2044 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2045 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2046 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2047 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2048 unstable(feature = "integer_atomics", issue = "32976"),
2049 "i32", "../../../std/primitive.i32.html",
2051 atomic_min, atomic_max,
2053 "AtomicI32::new(0)",
2054 i32 AtomicI32 ATOMIC_I32_INIT
2056 #[cfg(target_has_atomic_load_store = "32")]
2058 cfg(target_has_atomic = "32"),
2059 cfg(target_has_atomic_equal_alignment = "32"),
2060 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2061 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2062 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2063 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2064 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2065 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2066 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2067 unstable(feature = "integer_atomics", issue = "32976"),
2068 "u32", "../../../std/primitive.u32.html",
2070 atomic_umin, atomic_umax,
2072 "AtomicU32::new(0)",
2073 u32 AtomicU32 ATOMIC_U32_INIT
2075 #[cfg(target_has_atomic_load_store = "64")]
2077 cfg(target_has_atomic = "64"),
2078 cfg(target_has_atomic_equal_alignment = "64"),
2079 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2080 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2081 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2082 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2083 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2084 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2085 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2086 unstable(feature = "integer_atomics", issue = "32976"),
2087 "i64", "../../../std/primitive.i64.html",
2089 atomic_min, atomic_max,
2091 "AtomicI64::new(0)",
2092 i64 AtomicI64 ATOMIC_I64_INIT
2094 #[cfg(target_has_atomic_load_store = "64")]
2096 cfg(target_has_atomic = "64"),
2097 cfg(target_has_atomic_equal_alignment = "64"),
2098 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2099 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2100 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2101 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2102 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2103 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2104 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2105 unstable(feature = "integer_atomics", issue = "32976"),
2106 "u64", "../../../std/primitive.u64.html",
2108 atomic_umin, atomic_umax,
2110 "AtomicU64::new(0)",
2111 u64 AtomicU64 ATOMIC_U64_INIT
2113 #[cfg(target_has_atomic_load_store = "128")]
2115 cfg(target_has_atomic = "128"),
2116 cfg(target_has_atomic_equal_alignment = "128"),
2117 unstable(feature = "integer_atomics", issue = "32976"),
2118 unstable(feature = "integer_atomics", issue = "32976"),
2119 unstable(feature = "integer_atomics", issue = "32976"),
2120 unstable(feature = "integer_atomics", issue = "32976"),
2121 unstable(feature = "integer_atomics", issue = "32976"),
2122 unstable(feature = "integer_atomics", issue = "32976"),
2123 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2124 unstable(feature = "integer_atomics", issue = "32976"),
2125 "i128", "../../../std/primitive.i128.html",
2126 "#![feature(integer_atomics)]\n\n",
2127 atomic_min, atomic_max,
2129 "AtomicI128::new(0)",
2130 i128 AtomicI128 ATOMIC_I128_INIT
2132 #[cfg(target_has_atomic_load_store = "128")]
2134 cfg(target_has_atomic = "128"),
2135 cfg(target_has_atomic_equal_alignment = "128"),
2136 unstable(feature = "integer_atomics", issue = "32976"),
2137 unstable(feature = "integer_atomics", issue = "32976"),
2138 unstable(feature = "integer_atomics", issue = "32976"),
2139 unstable(feature = "integer_atomics", issue = "32976"),
2140 unstable(feature = "integer_atomics", issue = "32976"),
2141 unstable(feature = "integer_atomics", issue = "32976"),
2142 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2143 unstable(feature = "integer_atomics", issue = "32976"),
2144 "u128", "../../../std/primitive.u128.html",
2145 "#![feature(integer_atomics)]\n\n",
2146 atomic_umin, atomic_umax,
2148 "AtomicU128::new(0)",
2149 u128 AtomicU128 ATOMIC_U128_INIT
2151 #[cfg(target_has_atomic_load_store = "ptr")]
2152 #[cfg(target_pointer_width = "16")]
2153 macro_rules! ptr_width {
2158 #[cfg(target_has_atomic_load_store = "ptr")]
2159 #[cfg(target_pointer_width = "32")]
2160 macro_rules! ptr_width {
2165 #[cfg(target_has_atomic_load_store = "ptr")]
2166 #[cfg(target_pointer_width = "64")]
2167 macro_rules! ptr_width {
2172 #[cfg(target_has_atomic_load_store = "ptr")]
2174 cfg(target_has_atomic = "ptr"),
2175 cfg(target_has_atomic_equal_alignment = "ptr"),
2176 stable(feature = "rust1", since = "1.0.0"),
2177 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2178 stable(feature = "atomic_debug", since = "1.3.0"),
2179 stable(feature = "atomic_access", since = "1.15.0"),
2180 stable(feature = "atomic_from", since = "1.23.0"),
2181 stable(feature = "atomic_nand", since = "1.27.0"),
2182 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2183 stable(feature = "rust1", since = "1.0.0"),
2184 "isize", "../../../std/primitive.isize.html",
2186 atomic_min, atomic_max,
2188 "AtomicIsize::new(0)",
2189 isize AtomicIsize ATOMIC_ISIZE_INIT
2191 #[cfg(target_has_atomic_load_store = "ptr")]
2193 cfg(target_has_atomic = "ptr"),
2194 cfg(target_has_atomic_equal_alignment = "ptr"),
2195 stable(feature = "rust1", since = "1.0.0"),
2196 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2197 stable(feature = "atomic_debug", since = "1.3.0"),
2198 stable(feature = "atomic_access", since = "1.15.0"),
2199 stable(feature = "atomic_from", since = "1.23.0"),
2200 stable(feature = "atomic_nand", since = "1.27.0"),
2201 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2202 stable(feature = "rust1", since = "1.0.0"),
2203 "usize", "../../../std/primitive.usize.html",
2205 atomic_umin, atomic_umax,
2207 "AtomicUsize::new(0)",
2208 usize AtomicUsize ATOMIC_USIZE_INIT
2212 #[cfg(target_has_atomic = "8")]
2213 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2224 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
2225 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
2228 Release => intrinsics::atomic_store_rel(dst, val),
2229 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2230 SeqCst => intrinsics::atomic_store(dst, val),
2231 Acquire => panic!("there is no such thing as an acquire store"),
2232 AcqRel => panic!("there is no such thing as an acquire/release store"),
2238 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
2239 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
2242 Acquire => intrinsics::atomic_load_acq(dst),
2243 Relaxed => intrinsics::atomic_load_relaxed(dst),
2244 SeqCst => intrinsics::atomic_load(dst),
2245 Release => panic!("there is no such thing as a release load"),
2246 AcqRel => panic!("there is no such thing as an acquire/release load"),
2252 #[cfg(target_has_atomic = "8")]
2253 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2254 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
2257 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2258 Release => intrinsics::atomic_xchg_rel(dst, val),
2259 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2260 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2261 SeqCst => intrinsics::atomic_xchg(dst, val),
2266 /// Returns the previous value (like __sync_fetch_and_add).
2268 #[cfg(target_has_atomic = "8")]
2269 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2270 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
2273 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2274 Release => intrinsics::atomic_xadd_rel(dst, val),
2275 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2276 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2277 SeqCst => intrinsics::atomic_xadd(dst, val),
2282 /// Returns the previous value (like __sync_fetch_and_sub).
2284 #[cfg(target_has_atomic = "8")]
2285 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2286 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
2289 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2290 Release => intrinsics::atomic_xsub_rel(dst, val),
2291 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2292 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2293 SeqCst => intrinsics::atomic_xsub(dst, val),
2299 #[cfg(target_has_atomic = "8")]
2300 unsafe fn atomic_compare_exchange<T: Copy>(
2307 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
2308 let (val, ok) = unsafe {
2309 match (success, failure) {
2310 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2311 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2312 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2313 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2314 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2315 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2316 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2317 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2318 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2319 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2320 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2321 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2324 if ok { Ok(val) } else { Err(val) }
2328 #[cfg(target_has_atomic = "8")]
2329 unsafe fn atomic_compare_exchange_weak<T: Copy>(
2336 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
2337 let (val, ok) = unsafe {
2338 match (success, failure) {
2339 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2340 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2341 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2342 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2343 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2344 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2345 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2346 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2347 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2348 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2349 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2350 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2353 if ok { Ok(val) } else { Err(val) }
2357 #[cfg(target_has_atomic = "8")]
2358 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2359 // SAFETY: the caller must uphold the safety contract for `atomic_and`
2362 Acquire => intrinsics::atomic_and_acq(dst, val),
2363 Release => intrinsics::atomic_and_rel(dst, val),
2364 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2365 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2366 SeqCst => intrinsics::atomic_and(dst, val),
2372 #[cfg(target_has_atomic = "8")]
2373 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2374 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
2377 Acquire => intrinsics::atomic_nand_acq(dst, val),
2378 Release => intrinsics::atomic_nand_rel(dst, val),
2379 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2380 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2381 SeqCst => intrinsics::atomic_nand(dst, val),
2387 #[cfg(target_has_atomic = "8")]
2388 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2389 // SAFETY: the caller must uphold the safety contract for `atomic_or`
2392 Acquire => intrinsics::atomic_or_acq(dst, val),
2393 Release => intrinsics::atomic_or_rel(dst, val),
2394 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2395 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2396 SeqCst => intrinsics::atomic_or(dst, val),
2402 #[cfg(target_has_atomic = "8")]
2403 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2404 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
2407 Acquire => intrinsics::atomic_xor_acq(dst, val),
2408 Release => intrinsics::atomic_xor_rel(dst, val),
2409 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2410 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2411 SeqCst => intrinsics::atomic_xor(dst, val),
2416 /// returns the max value (signed comparison)
2418 #[cfg(target_has_atomic = "8")]
2419 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2420 // SAFETY: the caller must uphold the safety contract for `atomic_max`
2423 Acquire => intrinsics::atomic_max_acq(dst, val),
2424 Release => intrinsics::atomic_max_rel(dst, val),
2425 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2426 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2427 SeqCst => intrinsics::atomic_max(dst, val),
2432 /// returns the min value (signed comparison)
2434 #[cfg(target_has_atomic = "8")]
2435 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2436 // SAFETY: the caller must uphold the safety contract for `atomic_min`
2439 Acquire => intrinsics::atomic_min_acq(dst, val),
2440 Release => intrinsics::atomic_min_rel(dst, val),
2441 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2442 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2443 SeqCst => intrinsics::atomic_min(dst, val),
2448 /// returns the max value (unsigned comparison)
2450 #[cfg(target_has_atomic = "8")]
2451 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2452 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
2455 Acquire => intrinsics::atomic_umax_acq(dst, val),
2456 Release => intrinsics::atomic_umax_rel(dst, val),
2457 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2458 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2459 SeqCst => intrinsics::atomic_umax(dst, val),
2464 /// returns the min value (unsigned comparison)
2466 #[cfg(target_has_atomic = "8")]
2467 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2468 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
2471 Acquire => intrinsics::atomic_umin_acq(dst, val),
2472 Release => intrinsics::atomic_umin_rel(dst, val),
2473 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2474 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2475 SeqCst => intrinsics::atomic_umin(dst, val),
2480 /// An atomic fence.
2482 /// Depending on the specified order, a fence prevents the compiler and CPU from
2483 /// reordering certain types of memory operations around it.
2484 /// That creates synchronizes-with relationships between it and atomic operations
2485 /// or fences in other threads.
2487 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2488 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2489 /// exist operations X and Y, both operating on some atomic object 'M' such
2490 /// that A is sequenced before X, Y is synchronized before B and Y observes
2491 /// the change to M. This provides a happens-before dependence between A and B.
2494 /// Thread 1 Thread 2
2496 /// fence(Release); A --------------
2497 /// x.store(3, Relaxed); X --------- |
2500 /// -------------> Y if x.load(Relaxed) == 3 {
2501 /// |-------> B fence(Acquire);
2506 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2509 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2510 /// and [`Release`] semantics, participates in the global program order of the
2511 /// other [`SeqCst`] operations and/or fences.
2513 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2517 /// Panics if `order` is [`Relaxed`].
2522 /// use std::sync::atomic::AtomicBool;
2523 /// use std::sync::atomic::fence;
2524 /// use std::sync::atomic::Ordering;
2526 /// // A mutual exclusion primitive based on spinlock.
2527 /// pub struct Mutex {
2528 /// flag: AtomicBool,
2532 /// pub fn new() -> Mutex {
2534 /// flag: AtomicBool::new(false),
2538 /// pub fn lock(&self) {
2539 /// // Wait until the old value is `false`.
2540 /// while self.flag.compare_and_swap(false, true, Ordering::Relaxed) != false {}
2541 /// // This fence synchronizes-with store in `unlock`.
2542 /// fence(Ordering::Acquire);
2545 /// pub fn unlock(&self) {
2546 /// self.flag.store(false, Ordering::Release);
2551 #[stable(feature = "rust1", since = "1.0.0")]
2552 pub fn fence(order: Ordering) {
2553 // SAFETY: using an atomic fence is safe.
2556 Acquire => intrinsics::atomic_fence_acq(),
2557 Release => intrinsics::atomic_fence_rel(),
2558 AcqRel => intrinsics::atomic_fence_acqrel(),
2559 SeqCst => intrinsics::atomic_fence(),
2560 Relaxed => panic!("there is no such thing as a relaxed fence"),
2565 /// A compiler memory fence.
2567 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2568 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2569 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2570 /// or writes from before or after the call to the other side of the call to
2571 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2572 /// from doing such re-ordering. This is not a problem in a single-threaded,
2573 /// execution context, but when other threads may modify memory at the same
2574 /// time, stronger synchronization primitives such as [`fence`] are required.
2576 /// The re-ordering prevented by the different ordering semantics are:
2578 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2579 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2580 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2581 /// - with [`AcqRel`], both of the above rules are enforced.
2583 /// `compiler_fence` is generally only useful for preventing a thread from
2584 /// racing *with itself*. That is, if a given thread is executing one piece
2585 /// of code, and is then interrupted, and starts executing code elsewhere
2586 /// (while still in the same thread, and conceptually still on the same
2587 /// core). In traditional programs, this can only occur when a signal
2588 /// handler is registered. In more low-level code, such situations can also
2589 /// arise when handling interrupts, when implementing green threads with
2590 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2591 /// discussion of [memory barriers].
2595 /// Panics if `order` is [`Relaxed`].
2599 /// Without `compiler_fence`, the `assert_eq!` in following code
2600 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2601 /// To see why, remember that the compiler is free to swap the stores to
2602 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2603 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2604 /// after `IS_READY` is updated, then the signal handler will see
2605 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2606 /// Using a `compiler_fence` remedies this situation.
2609 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2610 /// use std::sync::atomic::Ordering;
2611 /// use std::sync::atomic::compiler_fence;
2613 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2614 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2617 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2618 /// // prevent earlier writes from being moved beyond this point
2619 /// compiler_fence(Ordering::Release);
2620 /// IS_READY.store(true, Ordering::Relaxed);
2623 /// fn signal_handler() {
2624 /// if IS_READY.load(Ordering::Relaxed) {
2625 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2630 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2632 #[stable(feature = "compiler_fences", since = "1.21.0")]
2633 pub fn compiler_fence(order: Ordering) {
2634 // SAFETY: using an atomic fence is safe.
2637 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2638 Release => intrinsics::atomic_singlethreadfence_rel(),
2639 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2640 SeqCst => intrinsics::atomic_singlethreadfence(),
2641 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2646 #[cfg(target_has_atomic_load_store = "8")]
2647 #[stable(feature = "atomic_debug", since = "1.3.0")]
2648 impl fmt::Debug for AtomicBool {
2649 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2650 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2654 #[cfg(target_has_atomic_load_store = "ptr")]
2655 #[stable(feature = "atomic_debug", since = "1.3.0")]
2656 impl<T> fmt::Debug for AtomicPtr<T> {
2657 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2658 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2662 #[cfg(target_has_atomic_load_store = "ptr")]
2663 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2664 impl<T> fmt::Pointer for AtomicPtr<T> {
2665 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2666 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)