3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 #![stable(feature = "rust1", since = "1.0.0")]
116 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
117 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
119 use self::Ordering::*;
121 use crate::cell::UnsafeCell;
123 use crate::intrinsics;
125 use crate::hint::spin_loop;
127 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
129 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
130 /// power or switching hyper-threads.
132 /// This function is different from [`std::thread::yield_now`] which directly yields to the
133 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
135 /// A common use case for `spin_loop_hint` is implementing bounded optimistic spinning in a CAS
136 /// loop in synchronization primitives. To avoid problems like priority inversion, it is strongly
137 /// recommended that the spin loop is terminated after a finite amount of iterations and an
138 /// appropriate blocking syscall is made.
140 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
141 /// do anything at all.
143 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
144 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
145 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
147 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
148 pub fn spin_loop_hint() {
152 /// A boolean type which can be safely shared between threads.
154 /// This type has the same in-memory representation as a [`bool`].
156 /// **Note**: This type may not be available on some platforms.
158 /// [`bool`]: ../../../std/primitive.bool.html
159 #[cfg(target_has_atomic_load_store = "8")]
160 #[stable(feature = "rust1", since = "1.0.0")]
162 pub struct AtomicBool {
166 #[cfg(target_has_atomic_load_store = "8")]
167 #[stable(feature = "rust1", since = "1.0.0")]
168 impl Default for AtomicBool {
169 /// Creates an `AtomicBool` initialized to `false`.
170 fn default() -> Self {
175 // Send is implicitly implemented for AtomicBool.
176 #[cfg(target_has_atomic_load_store = "8")]
177 #[stable(feature = "rust1", since = "1.0.0")]
178 unsafe impl Sync for AtomicBool {}
180 /// A raw pointer type which can be safely shared between threads.
182 /// This type has the same in-memory representation as a `*mut T`.
184 /// **Note**: This type may not be available on some platforms. Its size depends
185 /// on the target pointer's size.
186 #[cfg(target_has_atomic_load_store = "ptr")]
187 #[stable(feature = "rust1", since = "1.0.0")]
188 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
189 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
190 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
191 pub struct AtomicPtr<T> {
192 p: UnsafeCell<*mut T>,
195 #[cfg(target_has_atomic_load_store = "ptr")]
196 #[stable(feature = "rust1", since = "1.0.0")]
197 impl<T> Default for AtomicPtr<T> {
198 /// Creates a null `AtomicPtr<T>`.
199 fn default() -> AtomicPtr<T> {
200 AtomicPtr::new(crate::ptr::null_mut())
204 #[cfg(target_has_atomic_load_store = "ptr")]
205 #[stable(feature = "rust1", since = "1.0.0")]
206 unsafe impl<T> Send for AtomicPtr<T> {}
207 #[cfg(target_has_atomic_load_store = "ptr")]
208 #[stable(feature = "rust1", since = "1.0.0")]
209 unsafe impl<T> Sync for AtomicPtr<T> {}
211 /// Atomic memory orderings
213 /// Memory orderings specify the way atomic operations synchronize memory.
214 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
215 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
216 /// operations synchronize other memory while additionally preserving a total order of such
217 /// operations across all threads.
219 /// Rust's memory orderings are [the same as those of
220 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
222 /// For more information see the [nomicon].
224 /// [nomicon]: ../../../nomicon/atomics.html
225 /// [Ordering::Relaxed]: #variant.Relaxed
226 /// [Ordering::SeqCst]: #variant.SeqCst
227 #[stable(feature = "rust1", since = "1.0.0")]
228 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
231 /// No ordering constraints, only atomic operations.
233 /// Corresponds to [`memory_order_relaxed`] in C++20.
235 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
236 #[stable(feature = "rust1", since = "1.0.0")]
238 /// When coupled with a store, all previous operations become ordered
239 /// before any load of this value with [`Acquire`] (or stronger) ordering.
240 /// In particular, all previous writes become visible to all threads
241 /// that perform an [`Acquire`] (or stronger) load of this value.
243 /// Notice that using this ordering for an operation that combines loads
244 /// and stores leads to a [`Relaxed`] load operation!
246 /// This ordering is only applicable for operations that can perform a store.
248 /// Corresponds to [`memory_order_release`] in C++20.
250 /// [`Release`]: #variant.Release
251 /// [`Acquire`]: #variant.Acquire
252 /// [`Relaxed`]: #variant.Relaxed
253 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
254 #[stable(feature = "rust1", since = "1.0.0")]
256 /// When coupled with a load, if the loaded value was written by a store operation with
257 /// [`Release`] (or stronger) ordering, then all subsequent operations
258 /// become ordered after that store. In particular, all subsequent loads will see data
259 /// written before the store.
261 /// Notice that using this ordering for an operation that combines loads
262 /// and stores leads to a [`Relaxed`] store operation!
264 /// This ordering is only applicable for operations that can perform a load.
266 /// Corresponds to [`memory_order_acquire`] in C++20.
268 /// [`Acquire`]: #variant.Acquire
269 /// [`Release`]: #variant.Release
270 /// [`Relaxed`]: #variant.Relaxed
271 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
272 #[stable(feature = "rust1", since = "1.0.0")]
274 /// Has the effects of both [`Acquire`] and [`Release`] together:
275 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
277 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
278 /// not performing any store and hence it has just [`Acquire`] ordering. However,
279 /// `AcqRel` will never perform [`Relaxed`] accesses.
281 /// This ordering is only applicable for operations that combine both loads and stores.
283 /// Corresponds to [`memory_order_acq_rel`] in C++20.
285 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
286 /// [`Acquire`]: #variant.Acquire
287 /// [`Release`]: #variant.Release
288 /// [`Relaxed`]: #variant.Relaxed
289 #[stable(feature = "rust1", since = "1.0.0")]
291 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
292 /// operations, respectively) with the additional guarantee that all threads see all
293 /// sequentially consistent operations in the same order.
295 /// Corresponds to [`memory_order_seq_cst`] in C++20.
297 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
298 /// [`Acquire`]: #variant.Acquire
299 /// [`Release`]: #variant.Release
300 /// [`AcqRel`]: #variant.AcqRel
301 #[stable(feature = "rust1", since = "1.0.0")]
305 /// An [`AtomicBool`] initialized to `false`.
307 /// [`AtomicBool`]: struct.AtomicBool.html
308 #[cfg(target_has_atomic_load_store = "8")]
309 #[stable(feature = "rust1", since = "1.0.0")]
312 reason = "the `new` function is now preferred",
313 suggestion = "AtomicBool::new(false)"
315 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
317 #[cfg(target_has_atomic_load_store = "8")]
319 /// Creates a new `AtomicBool`.
324 /// use std::sync::atomic::AtomicBool;
326 /// let atomic_true = AtomicBool::new(true);
327 /// let atomic_false = AtomicBool::new(false);
330 #[stable(feature = "rust1", since = "1.0.0")]
331 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
332 pub const fn new(v: bool) -> AtomicBool {
333 AtomicBool { v: UnsafeCell::new(v as u8) }
336 /// Returns a mutable reference to the underlying [`bool`].
338 /// This is safe because the mutable reference guarantees that no other threads are
339 /// concurrently accessing the atomic data.
341 /// [`bool`]: ../../../std/primitive.bool.html
346 /// use std::sync::atomic::{AtomicBool, Ordering};
348 /// let mut some_bool = AtomicBool::new(true);
349 /// assert_eq!(*some_bool.get_mut(), true);
350 /// *some_bool.get_mut() = false;
351 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
354 #[stable(feature = "atomic_access", since = "1.15.0")]
355 pub fn get_mut(&mut self) -> &mut bool {
356 // SAFETY: the mutable reference guarantees unique ownership.
357 unsafe { &mut *(self.v.get() as *mut bool) }
360 /// Consumes the atomic and returns the contained value.
362 /// This is safe because passing `self` by value guarantees that no other threads are
363 /// concurrently accessing the atomic data.
368 /// use std::sync::atomic::AtomicBool;
370 /// let some_bool = AtomicBool::new(true);
371 /// assert_eq!(some_bool.into_inner(), true);
374 #[stable(feature = "atomic_access", since = "1.15.0")]
375 pub fn into_inner(self) -> bool {
376 self.v.into_inner() != 0
379 /// Loads a value from the bool.
381 /// `load` takes an [`Ordering`] argument which describes the memory ordering
382 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
386 /// Panics if `order` is [`Release`] or [`AcqRel`].
388 /// [`Ordering`]: enum.Ordering.html
389 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
390 /// [`Release`]: enum.Ordering.html#variant.Release
391 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
392 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
393 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
398 /// use std::sync::atomic::{AtomicBool, Ordering};
400 /// let some_bool = AtomicBool::new(true);
402 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
405 #[stable(feature = "rust1", since = "1.0.0")]
406 pub fn load(&self, order: Ordering) -> bool {
407 // SAFETY: any data races are prevented by atomic intrinsics and the raw
408 // pointer passed in is valid because we got it from a reference.
409 unsafe { atomic_load(self.v.get(), order) != 0 }
412 /// Stores a value into the bool.
414 /// `store` takes an [`Ordering`] argument which describes the memory ordering
415 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
419 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
421 /// [`Ordering`]: enum.Ordering.html
422 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
423 /// [`Release`]: enum.Ordering.html#variant.Release
424 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
425 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
426 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
431 /// use std::sync::atomic::{AtomicBool, Ordering};
433 /// let some_bool = AtomicBool::new(true);
435 /// some_bool.store(false, Ordering::Relaxed);
436 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
439 #[stable(feature = "rust1", since = "1.0.0")]
440 pub fn store(&self, val: bool, order: Ordering) {
441 // SAFETY: any data races are prevented by atomic intrinsics and the raw
442 // pointer passed in is valid because we got it from a reference.
444 atomic_store(self.v.get(), val as u8, order);
448 /// Stores a value into the bool, returning the previous value.
450 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
451 /// of this operation. All ordering modes are possible. Note that using
452 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
453 /// using [`Release`] makes the load part [`Relaxed`].
455 /// [`Ordering`]: enum.Ordering.html
456 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
457 /// [`Release`]: enum.Ordering.html#variant.Release
458 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
463 /// use std::sync::atomic::{AtomicBool, Ordering};
465 /// let some_bool = AtomicBool::new(true);
467 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
468 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
471 /// **Note**: This method may not be available on some platforms.
473 #[stable(feature = "rust1", since = "1.0.0")]
474 #[cfg(target_has_atomic = "8")]
475 pub fn swap(&self, val: bool, order: Ordering) -> bool {
476 // SAFETY: data races are prevented by atomic intrinsics.
477 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
480 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
482 /// The return value is always the previous value. If it is equal to `current`, then the value
485 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
486 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
487 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
488 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
489 /// happens, and using [`Release`] makes the load part [`Relaxed`].
491 /// [`Ordering`]: enum.Ordering.html
492 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
493 /// [`Release`]: enum.Ordering.html#variant.Release
494 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
495 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
496 /// [`bool`]: ../../../std/primitive.bool.html
501 /// use std::sync::atomic::{AtomicBool, Ordering};
503 /// let some_bool = AtomicBool::new(true);
505 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
506 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
508 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
509 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
512 /// **Note**: This method may not be available on some platforms.
514 #[stable(feature = "rust1", since = "1.0.0")]
515 #[cfg(target_has_atomic = "8")]
516 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
517 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
523 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
525 /// The return value is a result indicating whether the new value was written and containing
526 /// the previous value. On success this value is guaranteed to be equal to `current`.
528 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
529 /// ordering of this operation. The first describes the required ordering if the
530 /// operation succeeds while the second describes the required ordering when the
531 /// operation fails. Using [`Acquire`] as success ordering makes the store part
532 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
533 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
534 /// and must be equivalent to or weaker than the success ordering.
537 /// [`bool`]: ../../../std/primitive.bool.html
538 /// [`Ordering`]: enum.Ordering.html
539 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
540 /// [`Release`]: enum.Ordering.html#variant.Release
541 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
542 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
547 /// use std::sync::atomic::{AtomicBool, Ordering};
549 /// let some_bool = AtomicBool::new(true);
551 /// assert_eq!(some_bool.compare_exchange(true,
553 /// Ordering::Acquire,
554 /// Ordering::Relaxed),
556 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
558 /// assert_eq!(some_bool.compare_exchange(true, true,
559 /// Ordering::SeqCst,
560 /// Ordering::Acquire),
562 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
565 /// **Note**: This method may not be available on some platforms.
567 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
568 #[cfg(target_has_atomic = "8")]
569 pub fn compare_exchange(
575 ) -> Result<bool, bool> {
576 // SAFETY: data races are prevented by atomic intrinsics.
578 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
581 Err(x) => Err(x != 0),
585 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
587 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
588 /// comparison succeeds, which can result in more efficient code on some platforms. The
589 /// return value is a result indicating whether the new value was written and containing the
592 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
593 /// ordering of this operation. The first describes the required ordering if the
594 /// operation succeeds while the second describes the required ordering when the
595 /// operation fails. Using [`Acquire`] as success ordering makes the store part
596 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
597 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
598 /// and must be equivalent to or weaker than the success ordering.
600 /// [`bool`]: ../../../std/primitive.bool.html
601 /// [`compare_exchange`]: #method.compare_exchange
602 /// [`Ordering`]: enum.Ordering.html
603 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
604 /// [`Release`]: enum.Ordering.html#variant.Release
605 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
606 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
611 /// use std::sync::atomic::{AtomicBool, Ordering};
613 /// let val = AtomicBool::new(false);
616 /// let mut old = val.load(Ordering::Relaxed);
618 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
620 /// Err(x) => old = x,
625 /// **Note**: This method may not be available on some platforms.
627 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
628 #[cfg(target_has_atomic = "8")]
629 pub fn compare_exchange_weak(
635 ) -> Result<bool, bool> {
636 // SAFETY: data races are prevented by atomic intrinsics.
638 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
641 Err(x) => Err(x != 0),
645 /// Logical "and" with a boolean value.
647 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
648 /// the new value to the result.
650 /// Returns the previous value.
652 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
653 /// of this operation. All ordering modes are possible. Note that using
654 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
655 /// using [`Release`] makes the load part [`Relaxed`].
657 /// [`Ordering`]: enum.Ordering.html
658 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
659 /// [`Release`]: enum.Ordering.html#variant.Release
660 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
665 /// use std::sync::atomic::{AtomicBool, Ordering};
667 /// let foo = AtomicBool::new(true);
668 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
669 /// assert_eq!(foo.load(Ordering::SeqCst), false);
671 /// let foo = AtomicBool::new(true);
672 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
673 /// assert_eq!(foo.load(Ordering::SeqCst), true);
675 /// let foo = AtomicBool::new(false);
676 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
677 /// assert_eq!(foo.load(Ordering::SeqCst), false);
680 /// **Note**: This method may not be available on some platforms.
682 #[stable(feature = "rust1", since = "1.0.0")]
683 #[cfg(target_has_atomic = "8")]
684 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
685 // SAFETY: data races are prevented by atomic intrinsics.
686 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
689 /// Logical "nand" with a boolean value.
691 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
692 /// the new value to the result.
694 /// Returns the previous value.
696 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
697 /// of this operation. All ordering modes are possible. Note that using
698 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
699 /// using [`Release`] makes the load part [`Relaxed`].
701 /// [`Ordering`]: enum.Ordering.html
702 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
703 /// [`Release`]: enum.Ordering.html#variant.Release
704 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
709 /// use std::sync::atomic::{AtomicBool, Ordering};
711 /// let foo = AtomicBool::new(true);
712 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
713 /// assert_eq!(foo.load(Ordering::SeqCst), true);
715 /// let foo = AtomicBool::new(true);
716 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
717 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
718 /// assert_eq!(foo.load(Ordering::SeqCst), false);
720 /// let foo = AtomicBool::new(false);
721 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
722 /// assert_eq!(foo.load(Ordering::SeqCst), true);
725 /// **Note**: This method may not be available on some platforms.
727 #[stable(feature = "rust1", since = "1.0.0")]
728 #[cfg(target_has_atomic = "8")]
729 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
730 // We can't use atomic_nand here because it can result in a bool with
731 // an invalid value. This happens because the atomic operation is done
732 // with an 8-bit integer internally, which would set the upper 7 bits.
733 // So we just use fetch_xor or swap instead.
736 // We must invert the bool.
737 self.fetch_xor(true, order)
739 // !(x & false) == true
740 // We must set the bool to true.
741 self.swap(true, order)
745 /// Logical "or" with a boolean value.
747 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
748 /// new value to the result.
750 /// Returns the previous value.
752 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
753 /// of this operation. All ordering modes are possible. Note that using
754 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
755 /// using [`Release`] makes the load part [`Relaxed`].
757 /// [`Ordering`]: enum.Ordering.html
758 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
759 /// [`Release`]: enum.Ordering.html#variant.Release
760 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
765 /// use std::sync::atomic::{AtomicBool, Ordering};
767 /// let foo = AtomicBool::new(true);
768 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
769 /// assert_eq!(foo.load(Ordering::SeqCst), true);
771 /// let foo = AtomicBool::new(true);
772 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
773 /// assert_eq!(foo.load(Ordering::SeqCst), true);
775 /// let foo = AtomicBool::new(false);
776 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
777 /// assert_eq!(foo.load(Ordering::SeqCst), false);
780 /// **Note**: This method may not be available on some platforms.
782 #[stable(feature = "rust1", since = "1.0.0")]
783 #[cfg(target_has_atomic = "8")]
784 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
785 // SAFETY: data races are prevented by atomic intrinsics.
786 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
789 /// Logical "xor" with a boolean value.
791 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
792 /// the new value to the result.
794 /// Returns the previous value.
796 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
797 /// of this operation. All ordering modes are possible. Note that using
798 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
799 /// using [`Release`] makes the load part [`Relaxed`].
801 /// [`Ordering`]: enum.Ordering.html
802 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
803 /// [`Release`]: enum.Ordering.html#variant.Release
804 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
809 /// use std::sync::atomic::{AtomicBool, Ordering};
811 /// let foo = AtomicBool::new(true);
812 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
813 /// assert_eq!(foo.load(Ordering::SeqCst), true);
815 /// let foo = AtomicBool::new(true);
816 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
817 /// assert_eq!(foo.load(Ordering::SeqCst), false);
819 /// let foo = AtomicBool::new(false);
820 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
821 /// assert_eq!(foo.load(Ordering::SeqCst), false);
824 /// **Note**: This method may not be available on some platforms.
826 #[stable(feature = "rust1", since = "1.0.0")]
827 #[cfg(target_has_atomic = "8")]
828 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
829 // SAFETY: data races are prevented by atomic intrinsics.
830 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
833 /// Returns a mutable pointer to the underlying [`bool`].
835 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
836 /// This method is mostly useful for FFI, where the function signature may use
837 /// `*mut bool` instead of `&AtomicBool`.
839 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
840 /// atomic types work with interior mutability. All modifications of an atomic change the value
841 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
842 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
843 /// restriction: operations on it must be atomic.
845 /// [`bool`]: ../../../std/primitive.bool.html
849 /// ```ignore (extern-declaration)
851 /// use std::sync::atomic::AtomicBool;
853 /// fn my_atomic_op(arg: *mut bool);
856 /// let mut atomic = AtomicBool::new(true);
858 /// my_atomic_op(atomic.as_mut_ptr());
863 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
864 pub fn as_mut_ptr(&self) -> *mut bool {
865 self.v.get() as *mut bool
869 #[cfg(target_has_atomic_load_store = "ptr")]
870 impl<T> AtomicPtr<T> {
871 /// Creates a new `AtomicPtr`.
876 /// use std::sync::atomic::AtomicPtr;
878 /// let ptr = &mut 5;
879 /// let atomic_ptr = AtomicPtr::new(ptr);
882 #[stable(feature = "rust1", since = "1.0.0")]
883 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
884 pub const fn new(p: *mut T) -> AtomicPtr<T> {
885 AtomicPtr { p: UnsafeCell::new(p) }
888 /// Returns a mutable reference to the underlying pointer.
890 /// This is safe because the mutable reference guarantees that no other threads are
891 /// concurrently accessing the atomic data.
896 /// use std::sync::atomic::{AtomicPtr, Ordering};
898 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
899 /// *atomic_ptr.get_mut() = &mut 5;
900 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
903 #[stable(feature = "atomic_access", since = "1.15.0")]
904 pub fn get_mut(&mut self) -> &mut *mut T {
905 // SAFETY: the mutable reference guarantees unique ownership.
906 unsafe { &mut *self.p.get() }
909 /// Consumes the atomic and returns the contained value.
911 /// This is safe because passing `self` by value guarantees that no other threads are
912 /// concurrently accessing the atomic data.
917 /// use std::sync::atomic::AtomicPtr;
919 /// let atomic_ptr = AtomicPtr::new(&mut 5);
920 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
923 #[stable(feature = "atomic_access", since = "1.15.0")]
924 pub fn into_inner(self) -> *mut T {
928 /// Loads a value from the pointer.
930 /// `load` takes an [`Ordering`] argument which describes the memory ordering
931 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
935 /// Panics if `order` is [`Release`] or [`AcqRel`].
937 /// [`Ordering`]: enum.Ordering.html
938 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
939 /// [`Release`]: enum.Ordering.html#variant.Release
940 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
941 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
942 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
947 /// use std::sync::atomic::{AtomicPtr, Ordering};
949 /// let ptr = &mut 5;
950 /// let some_ptr = AtomicPtr::new(ptr);
952 /// let value = some_ptr.load(Ordering::Relaxed);
955 #[stable(feature = "rust1", since = "1.0.0")]
956 pub fn load(&self, order: Ordering) -> *mut T {
957 // SAFETY: data races are prevented by atomic intrinsics.
958 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
961 /// Stores a value into the pointer.
963 /// `store` takes an [`Ordering`] argument which describes the memory ordering
964 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
968 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
970 /// [`Ordering`]: enum.Ordering.html
971 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
972 /// [`Release`]: enum.Ordering.html#variant.Release
973 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
974 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
975 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
980 /// use std::sync::atomic::{AtomicPtr, Ordering};
982 /// let ptr = &mut 5;
983 /// let some_ptr = AtomicPtr::new(ptr);
985 /// let other_ptr = &mut 10;
987 /// some_ptr.store(other_ptr, Ordering::Relaxed);
990 #[stable(feature = "rust1", since = "1.0.0")]
991 pub fn store(&self, ptr: *mut T, order: Ordering) {
992 // SAFETY: data races are prevented by atomic intrinsics.
994 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
998 /// Stores a value into the pointer, returning the previous value.
1000 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1001 /// of this operation. All ordering modes are possible. Note that using
1002 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1003 /// using [`Release`] makes the load part [`Relaxed`].
1005 /// [`Ordering`]: enum.Ordering.html
1006 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1007 /// [`Release`]: enum.Ordering.html#variant.Release
1008 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1013 /// use std::sync::atomic::{AtomicPtr, Ordering};
1015 /// let ptr = &mut 5;
1016 /// let some_ptr = AtomicPtr::new(ptr);
1018 /// let other_ptr = &mut 10;
1020 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1023 /// **Note**: This method may not be available on some platforms.
1025 #[stable(feature = "rust1", since = "1.0.0")]
1026 #[cfg(target_has_atomic = "ptr")]
1027 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1028 // SAFETY: data races are prevented by atomic intrinsics.
1029 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1032 /// Stores a value into the pointer if the current value is the same as the `current` value.
1034 /// The return value is always the previous value. If it is equal to `current`, then the value
1037 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1038 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1039 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1040 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1041 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1043 /// [`Ordering`]: enum.Ordering.html
1044 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1045 /// [`Release`]: enum.Ordering.html#variant.Release
1046 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1047 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1052 /// use std::sync::atomic::{AtomicPtr, Ordering};
1054 /// let ptr = &mut 5;
1055 /// let some_ptr = AtomicPtr::new(ptr);
1057 /// let other_ptr = &mut 10;
1059 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1062 /// **Note**: This method may not be available on some platforms.
1064 #[stable(feature = "rust1", since = "1.0.0")]
1065 #[cfg(target_has_atomic = "ptr")]
1066 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1067 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1073 /// Stores a value into the pointer if the current value is the same as the `current` value.
1075 /// The return value is a result indicating whether the new value was written and containing
1076 /// the previous value. On success this value is guaranteed to be equal to `current`.
1078 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1079 /// ordering of this operation. The first describes the required ordering if the
1080 /// operation succeeds while the second describes the required ordering when the
1081 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1082 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1083 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1084 /// and must be equivalent to or weaker than the success ordering.
1086 /// [`Ordering`]: enum.Ordering.html
1087 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1088 /// [`Release`]: enum.Ordering.html#variant.Release
1089 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1090 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1095 /// use std::sync::atomic::{AtomicPtr, Ordering};
1097 /// let ptr = &mut 5;
1098 /// let some_ptr = AtomicPtr::new(ptr);
1100 /// let other_ptr = &mut 10;
1102 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1103 /// Ordering::SeqCst, Ordering::Relaxed);
1106 /// **Note**: This method may not be available on some platforms.
1108 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1109 #[cfg(target_has_atomic = "ptr")]
1110 pub fn compare_exchange(
1116 ) -> Result<*mut T, *mut T> {
1117 // SAFETY: data races are prevented by atomic intrinsics.
1119 let res = atomic_compare_exchange(
1120 self.p.get() as *mut usize,
1127 Ok(x) => Ok(x as *mut T),
1128 Err(x) => Err(x as *mut T),
1133 /// Stores a value into the pointer if the current value is the same as the `current` value.
1135 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1136 /// comparison succeeds, which can result in more efficient code on some platforms. The
1137 /// return value is a result indicating whether the new value was written and containing the
1140 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1141 /// ordering of this operation. The first describes the required ordering if the
1142 /// operation succeeds while the second describes the required ordering when the
1143 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1144 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1145 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1146 /// and must be equivalent to or weaker than the success ordering.
1148 /// [`compare_exchange`]: #method.compare_exchange
1149 /// [`Ordering`]: enum.Ordering.html
1150 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1151 /// [`Release`]: enum.Ordering.html#variant.Release
1152 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1153 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1158 /// use std::sync::atomic::{AtomicPtr, Ordering};
1160 /// let some_ptr = AtomicPtr::new(&mut 5);
1162 /// let new = &mut 10;
1163 /// let mut old = some_ptr.load(Ordering::Relaxed);
1165 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1167 /// Err(x) => old = x,
1172 /// **Note**: This method may not be available on some platforms.
1174 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1175 #[cfg(target_has_atomic = "ptr")]
1176 pub fn compare_exchange_weak(
1182 ) -> Result<*mut T, *mut T> {
1183 // SAFETY: data races are prevented by atomic intrinsics.
1185 let res = atomic_compare_exchange_weak(
1186 self.p.get() as *mut usize,
1193 Ok(x) => Ok(x as *mut T),
1194 Err(x) => Err(x as *mut T),
1200 #[cfg(target_has_atomic_load_store = "8")]
1201 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1202 impl From<bool> for AtomicBool {
1203 /// Converts a `bool` into an `AtomicBool`.
1208 /// use std::sync::atomic::AtomicBool;
1209 /// let atomic_bool = AtomicBool::from(true);
1210 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1213 fn from(b: bool) -> Self {
1218 #[cfg(target_has_atomic_load_store = "ptr")]
1219 #[stable(feature = "atomic_from", since = "1.23.0")]
1220 impl<T> From<*mut T> for AtomicPtr<T> {
1222 fn from(p: *mut T) -> Self {
1227 #[cfg(target_has_atomic_load_store = "8")]
1228 macro_rules! atomic_int {
1233 $stable_access:meta,
1237 $stable_init_const:meta,
1238 $s_int_type:expr, $int_ref:expr,
1239 $extra_feature:expr,
1240 $min_fn:ident, $max_fn:ident,
1243 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1244 /// An integer type which can be safely shared between threads.
1246 /// This type has the same in-memory representation as the underlying
1247 /// integer type, [`
1248 #[doc = $s_int_type]
1251 /// ). For more about the differences between atomic types and
1252 /// non-atomic types as well as information about the portability of
1253 /// this type, please see the [module-level documentation].
1255 /// **Note**: This type may not be available on some platforms.
1257 /// [module-level documentation]: index.html
1259 #[repr(C, align($align))]
1260 pub struct $atomic_type {
1261 v: UnsafeCell<$int_type>,
1264 /// An atomic integer initialized to `0`.
1265 #[$stable_init_const]
1268 reason = "the `new` function is now preferred",
1269 suggestion = $atomic_new,
1271 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1274 impl Default for $atomic_type {
1275 fn default() -> Self {
1276 Self::new(Default::default())
1281 impl From<$int_type> for $atomic_type {
1284 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1286 fn from(v: $int_type) -> Self { Self::new(v) }
1291 impl fmt::Debug for $atomic_type {
1292 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1293 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1297 // Send is implicitly implemented.
1299 unsafe impl Sync for $atomic_type {}
1303 concat!("Creates a new atomic integer.
1308 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1310 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1315 pub const fn new(v: $int_type) -> Self {
1316 Self {v: UnsafeCell::new(v)}
1321 concat!("Returns a mutable reference to the underlying integer.
1323 This is safe because the mutable reference guarantees that no other threads are
1324 concurrently accessing the atomic data.
1329 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1331 let mut some_var = ", stringify!($atomic_type), "::new(10);
1332 assert_eq!(*some_var.get_mut(), 10);
1333 *some_var.get_mut() = 5;
1334 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1338 pub fn get_mut(&mut self) -> &mut $int_type {
1339 // SAFETY: the mutable reference guarantees unique ownership.
1340 unsafe { &mut *self.v.get() }
1345 concat!("Consumes the atomic and returns the contained value.
1347 This is safe because passing `self` by value guarantees that no other threads are
1348 concurrently accessing the atomic data.
1353 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1355 let some_var = ", stringify!($atomic_type), "::new(5);
1356 assert_eq!(some_var.into_inner(), 5);
1360 pub fn into_inner(self) -> $int_type {
1366 concat!("Loads a value from the atomic integer.
1368 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1369 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1373 Panics if `order` is [`Release`] or [`AcqRel`].
1375 [`Ordering`]: enum.Ordering.html
1376 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1377 [`Release`]: enum.Ordering.html#variant.Release
1378 [`Acquire`]: enum.Ordering.html#variant.Acquire
1379 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1380 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1385 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1387 let some_var = ", stringify!($atomic_type), "::new(5);
1389 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1393 pub fn load(&self, order: Ordering) -> $int_type {
1394 // SAFETY: data races are prevented by atomic intrinsics.
1395 unsafe { atomic_load(self.v.get(), order) }
1400 concat!("Stores a value into the atomic integer.
1402 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1403 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1407 Panics if `order` is [`Acquire`] or [`AcqRel`].
1409 [`Ordering`]: enum.Ordering.html
1410 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1411 [`Release`]: enum.Ordering.html#variant.Release
1412 [`Acquire`]: enum.Ordering.html#variant.Acquire
1413 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1414 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1419 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1421 let some_var = ", stringify!($atomic_type), "::new(5);
1423 some_var.store(10, Ordering::Relaxed);
1424 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1428 pub fn store(&self, val: $int_type, order: Ordering) {
1429 // SAFETY: data races are prevented by atomic intrinsics.
1430 unsafe { atomic_store(self.v.get(), val, order); }
1435 concat!("Stores a value into the atomic integer, returning the previous value.
1437 `swap` takes an [`Ordering`] argument which describes the memory ordering
1438 of this operation. All ordering modes are possible. Note that using
1439 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1440 using [`Release`] makes the load part [`Relaxed`].
1442 [`Ordering`]: enum.Ordering.html
1443 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1444 [`Release`]: enum.Ordering.html#variant.Release
1445 [`Acquire`]: enum.Ordering.html#variant.Acquire
1450 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1452 let some_var = ", stringify!($atomic_type), "::new(5);
1454 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1457 **Note**: This method may not be available on some platforms."),
1461 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1462 // SAFETY: data races are prevented by atomic intrinsics.
1463 unsafe { atomic_swap(self.v.get(), val, order) }
1468 concat!("Stores a value into the atomic integer if the current value is the same as
1469 the `current` value.
1471 The return value is always the previous value. If it is equal to `current`, then the
1474 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1475 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1476 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1477 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1478 happens, and using [`Release`] makes the load part [`Relaxed`].
1480 [`Ordering`]: enum.Ordering.html
1481 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1482 [`Release`]: enum.Ordering.html#variant.Release
1483 [`Acquire`]: enum.Ordering.html#variant.Acquire
1484 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1489 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1491 let some_var = ", stringify!($atomic_type), "::new(5);
1493 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1494 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1496 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1497 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1500 **Note**: This method may not be available on some platforms."),
1504 pub fn compare_and_swap(&self,
1507 order: Ordering) -> $int_type {
1508 match self.compare_exchange(current,
1511 strongest_failure_ordering(order)) {
1519 concat!("Stores a value into the atomic integer if the current value is the same as
1520 the `current` value.
1522 The return value is a result indicating whether the new value was written and
1523 containing the previous value. On success this value is guaranteed to be equal to
1526 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1527 ordering of this operation. The first describes the required ordering if the
1528 operation succeeds while the second describes the required ordering when the
1529 operation fails. Using [`Acquire`] as success ordering makes the store part
1530 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1531 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1532 and must be equivalent to or weaker than the success ordering.
1534 [`Ordering`]: enum.Ordering.html
1535 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1536 [`Release`]: enum.Ordering.html#variant.Release
1537 [`Acquire`]: enum.Ordering.html#variant.Acquire
1538 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1543 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1545 let some_var = ", stringify!($atomic_type), "::new(5);
1547 assert_eq!(some_var.compare_exchange(5, 10,
1551 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1553 assert_eq!(some_var.compare_exchange(6, 12,
1557 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1560 **Note**: This method may not be available on some platforms."),
1564 pub fn compare_exchange(&self,
1568 failure: Ordering) -> Result<$int_type, $int_type> {
1569 // SAFETY: data races are prevented by atomic intrinsics.
1570 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1575 concat!("Stores a value into the atomic integer if the current value is the same as
1576 the `current` value.
1578 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1579 when the comparison succeeds, which can result in more efficient code on some
1580 platforms. The return value is a result indicating whether the new value was
1581 written and containing the previous value.
1583 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1584 ordering of this operation. The first describes the required ordering if the
1585 operation succeeds while the second describes the required ordering when the
1586 operation fails. Using [`Acquire`] as success ordering makes the store part
1587 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1588 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1589 and must be equivalent to or weaker than the success ordering.
1591 [`compare_exchange`]: #method.compare_exchange
1592 [`Ordering`]: enum.Ordering.html
1593 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1594 [`Release`]: enum.Ordering.html#variant.Release
1595 [`Acquire`]: enum.Ordering.html#variant.Acquire
1596 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1601 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1603 let val = ", stringify!($atomic_type), "::new(4);
1605 let mut old = val.load(Ordering::Relaxed);
1608 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1615 **Note**: This method may not be available on some platforms."),
1619 pub fn compare_exchange_weak(&self,
1623 failure: Ordering) -> Result<$int_type, $int_type> {
1624 // SAFETY: data races are prevented by atomic intrinsics.
1626 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1632 concat!("Adds to the current value, returning the previous value.
1634 This operation wraps around on overflow.
1636 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1637 of this operation. All ordering modes are possible. Note that using
1638 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1639 using [`Release`] makes the load part [`Relaxed`].
1641 [`Ordering`]: enum.Ordering.html
1642 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1643 [`Release`]: enum.Ordering.html#variant.Release
1644 [`Acquire`]: enum.Ordering.html#variant.Acquire
1649 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1651 let foo = ", stringify!($atomic_type), "::new(0);
1652 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1653 assert_eq!(foo.load(Ordering::SeqCst), 10);
1656 **Note**: This method may not be available on some platforms."),
1660 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1661 // SAFETY: data races are prevented by atomic intrinsics.
1662 unsafe { atomic_add(self.v.get(), val, order) }
1667 concat!("Subtracts from the current value, returning the previous value.
1669 This operation wraps around on overflow.
1671 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1672 of this operation. All ordering modes are possible. Note that using
1673 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1674 using [`Release`] makes the load part [`Relaxed`].
1676 [`Ordering`]: enum.Ordering.html
1677 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1678 [`Release`]: enum.Ordering.html#variant.Release
1679 [`Acquire`]: enum.Ordering.html#variant.Acquire
1684 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1686 let foo = ", stringify!($atomic_type), "::new(20);
1687 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1688 assert_eq!(foo.load(Ordering::SeqCst), 10);
1691 **Note**: This method may not be available on some platforms."),
1695 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1696 // SAFETY: data races are prevented by atomic intrinsics.
1697 unsafe { atomic_sub(self.v.get(), val, order) }
1702 concat!("Bitwise \"and\" with the current value.
1704 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1705 sets the new value to the result.
1707 Returns the previous value.
1709 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1710 of this operation. All ordering modes are possible. Note that using
1711 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1712 using [`Release`] makes the load part [`Relaxed`].
1714 [`Ordering`]: enum.Ordering.html
1715 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1716 [`Release`]: enum.Ordering.html#variant.Release
1717 [`Acquire`]: enum.Ordering.html#variant.Acquire
1722 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1724 let foo = ", stringify!($atomic_type), "::new(0b101101);
1725 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1726 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1729 **Note**: This method may not be available on some platforms."),
1733 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1734 // SAFETY: data races are prevented by atomic intrinsics.
1735 unsafe { atomic_and(self.v.get(), val, order) }
1740 concat!("Bitwise \"nand\" with the current value.
1742 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1743 sets the new value to the result.
1745 Returns the previous value.
1747 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1748 of this operation. All ordering modes are possible. Note that using
1749 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1750 using [`Release`] makes the load part [`Relaxed`].
1752 [`Ordering`]: enum.Ordering.html
1753 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1754 [`Release`]: enum.Ordering.html#variant.Release
1755 [`Acquire`]: enum.Ordering.html#variant.Acquire
1760 ", $extra_feature, "
1761 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1763 let foo = ", stringify!($atomic_type), "::new(0x13);
1764 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1765 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1768 **Note**: This method may not be available on some platforms."),
1772 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1773 // SAFETY: data races are prevented by atomic intrinsics.
1774 unsafe { atomic_nand(self.v.get(), val, order) }
1779 concat!("Bitwise \"or\" with the current value.
1781 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1782 sets the new value to the result.
1784 Returns the previous value.
1786 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1787 of this operation. All ordering modes are possible. Note that using
1788 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1789 using [`Release`] makes the load part [`Relaxed`].
1791 [`Ordering`]: enum.Ordering.html
1792 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1793 [`Release`]: enum.Ordering.html#variant.Release
1794 [`Acquire`]: enum.Ordering.html#variant.Acquire
1799 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1801 let foo = ", stringify!($atomic_type), "::new(0b101101);
1802 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1803 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1806 **Note**: This method may not be available on some platforms."),
1810 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1811 // SAFETY: data races are prevented by atomic intrinsics.
1812 unsafe { atomic_or(self.v.get(), val, order) }
1817 concat!("Bitwise \"xor\" with the current value.
1819 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1820 sets the new value to the result.
1822 Returns the previous value.
1824 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1825 of this operation. All ordering modes are possible. Note that using
1826 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1827 using [`Release`] makes the load part [`Relaxed`].
1829 [`Ordering`]: enum.Ordering.html
1830 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1831 [`Release`]: enum.Ordering.html#variant.Release
1832 [`Acquire`]: enum.Ordering.html#variant.Acquire
1837 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1839 let foo = ", stringify!($atomic_type), "::new(0b101101);
1840 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1841 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1844 **Note**: This method may not be available on some platforms."),
1848 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1849 // SAFETY: data races are prevented by atomic intrinsics.
1850 unsafe { atomic_xor(self.v.get(), val, order) }
1855 concat!("Fetches the value, and applies a function to it that returns an optional
1856 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1857 `Err(previous_value)`.
1859 Note: This may call the function multiple times if the value has been changed from other threads in
1860 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1861 only once to the stored value.
1863 `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
1864 The first describes the required ordering for when the operation finally succeeds while the second
1865 describes the required ordering for loads. These correspond to the success and failure orderings of
1866 [`compare_exchange`] respectively.
1868 Using [`Acquire`] as success ordering makes the store part
1869 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1870 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1871 and must be equivalent to or weaker than the success ordering.
1873 [`bool`]: ../../../std/primitive.bool.html
1874 [`compare_exchange`]: #method.compare_exchange
1875 [`Ordering`]: enum.Ordering.html
1876 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1877 [`Release`]: enum.Ordering.html#variant.Release
1878 [`Acquire`]: enum.Ordering.html#variant.Acquire
1879 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1884 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1886 let x = ", stringify!($atomic_type), "::new(7);
1887 assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
1888 assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
1889 assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
1890 assert_eq!(x.load(Ordering::SeqCst), 9);
1893 **Note**: This method may not be available on some platforms."),
1895 #[stable(feature = "no_more_cas", since = "1.45.0")]
1897 pub fn fetch_update<F>(&self,
1898 set_order: Ordering,
1899 fetch_order: Ordering,
1900 mut f: F) -> Result<$int_type, $int_type>
1901 where F: FnMut($int_type) -> Option<$int_type> {
1902 let mut prev = self.load(fetch_order);
1903 while let Some(next) = f(prev) {
1904 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1905 x @ Ok(_) => return x,
1906 Err(next_prev) => prev = next_prev
1914 concat!("Maximum with the current value.
1916 Finds the maximum of the current value and the argument `val`, and
1917 sets the new value to the result.
1919 Returns the previous value.
1921 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1922 of this operation. All ordering modes are possible. Note that using
1923 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1924 using [`Release`] makes the load part [`Relaxed`].
1926 [`Ordering`]: enum.Ordering.html
1927 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1928 [`Release`]: enum.Ordering.html#variant.Release
1929 [`Acquire`]: enum.Ordering.html#variant.Acquire
1934 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1936 let foo = ", stringify!($atomic_type), "::new(23);
1937 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1938 assert_eq!(foo.load(Ordering::SeqCst), 42);
1941 If you want to obtain the maximum value in one step, you can use the following:
1944 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1946 let foo = ", stringify!($atomic_type), "::new(23);
1948 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1949 assert!(max_foo == 42);
1952 **Note**: This method may not be available on some platforms."),
1954 #[stable(feature = "atomic_min_max", since = "1.45.0")]
1956 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1957 // SAFETY: data races are prevented by atomic intrinsics.
1958 unsafe { $max_fn(self.v.get(), val, order) }
1963 concat!("Minimum with the current value.
1965 Finds the minimum of the current value and the argument `val`, and
1966 sets the new value to the result.
1968 Returns the previous value.
1970 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1971 of this operation. All ordering modes are possible. Note that using
1972 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1973 using [`Release`] makes the load part [`Relaxed`].
1975 [`Ordering`]: enum.Ordering.html
1976 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1977 [`Release`]: enum.Ordering.html#variant.Release
1978 [`Acquire`]: enum.Ordering.html#variant.Acquire
1983 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1985 let foo = ", stringify!($atomic_type), "::new(23);
1986 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1987 assert_eq!(foo.load(Ordering::Relaxed), 23);
1988 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1989 assert_eq!(foo.load(Ordering::Relaxed), 22);
1992 If you want to obtain the minimum value in one step, you can use the following:
1995 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1997 let foo = ", stringify!($atomic_type), "::new(23);
1999 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
2000 assert_eq!(min_foo, 12);
2003 **Note**: This method may not be available on some platforms."),
2005 #[stable(feature = "atomic_min_max", since = "1.45.0")]
2007 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
2008 // SAFETY: data races are prevented by atomic intrinsics.
2009 unsafe { $min_fn(self.v.get(), val, order) }
2014 concat!("Returns a mutable pointer to the underlying integer.
2016 Doing non-atomic reads and writes on the resulting integer can be a data race.
2017 This method is mostly useful for FFI, where the function signature may use
2018 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
2020 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2021 atomic types work with interior mutability. All modifications of an atomic change the value
2022 through a shared reference, and can do so safely as long as they use atomic operations. Any
2023 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
2024 restriction: operations on it must be atomic.
2028 ```ignore (extern-declaration)
2030 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
2033 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
2036 let mut atomic = ", stringify!($atomic_type), "::new(1);
2038 // SAFETY: Safe as long as `my_atomic_op` is atomic.
2040 my_atomic_op(atomic.as_mut_ptr());
2045 #[unstable(feature = "atomic_mut_ptr",
2046 reason = "recently added",
2048 pub fn as_mut_ptr(&self) -> *mut $int_type {
2056 #[cfg(target_has_atomic_load_store = "8")]
2058 cfg(target_has_atomic = "8"),
2059 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2060 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2061 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2062 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2063 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2064 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2065 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2066 unstable(feature = "integer_atomics", issue = "32976"),
2067 "i8", "../../../std/primitive.i8.html",
2069 atomic_min, atomic_max,
2072 i8 AtomicI8 ATOMIC_I8_INIT
2074 #[cfg(target_has_atomic_load_store = "8")]
2076 cfg(target_has_atomic = "8"),
2077 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2078 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2079 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2080 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2081 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2082 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2083 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2084 unstable(feature = "integer_atomics", issue = "32976"),
2085 "u8", "../../../std/primitive.u8.html",
2087 atomic_umin, atomic_umax,
2090 u8 AtomicU8 ATOMIC_U8_INIT
2092 #[cfg(target_has_atomic_load_store = "16")]
2094 cfg(target_has_atomic = "16"),
2095 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2096 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2097 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2098 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2099 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2100 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2101 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2102 unstable(feature = "integer_atomics", issue = "32976"),
2103 "i16", "../../../std/primitive.i16.html",
2105 atomic_min, atomic_max,
2107 "AtomicI16::new(0)",
2108 i16 AtomicI16 ATOMIC_I16_INIT
2110 #[cfg(target_has_atomic_load_store = "16")]
2112 cfg(target_has_atomic = "16"),
2113 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2114 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2115 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2116 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2117 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2118 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2119 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2120 unstable(feature = "integer_atomics", issue = "32976"),
2121 "u16", "../../../std/primitive.u16.html",
2123 atomic_umin, atomic_umax,
2125 "AtomicU16::new(0)",
2126 u16 AtomicU16 ATOMIC_U16_INIT
2128 #[cfg(target_has_atomic_load_store = "32")]
2130 cfg(target_has_atomic = "32"),
2131 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2132 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2133 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2134 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2135 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2136 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2137 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2138 unstable(feature = "integer_atomics", issue = "32976"),
2139 "i32", "../../../std/primitive.i32.html",
2141 atomic_min, atomic_max,
2143 "AtomicI32::new(0)",
2144 i32 AtomicI32 ATOMIC_I32_INIT
2146 #[cfg(target_has_atomic_load_store = "32")]
2148 cfg(target_has_atomic = "32"),
2149 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2150 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2151 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2152 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2153 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2154 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2155 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2156 unstable(feature = "integer_atomics", issue = "32976"),
2157 "u32", "../../../std/primitive.u32.html",
2159 atomic_umin, atomic_umax,
2161 "AtomicU32::new(0)",
2162 u32 AtomicU32 ATOMIC_U32_INIT
2164 #[cfg(target_has_atomic_load_store = "64")]
2166 cfg(target_has_atomic = "64"),
2167 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2168 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2169 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2170 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2171 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2172 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2173 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2174 unstable(feature = "integer_atomics", issue = "32976"),
2175 "i64", "../../../std/primitive.i64.html",
2177 atomic_min, atomic_max,
2179 "AtomicI64::new(0)",
2180 i64 AtomicI64 ATOMIC_I64_INIT
2182 #[cfg(target_has_atomic_load_store = "64")]
2184 cfg(target_has_atomic = "64"),
2185 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2186 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2187 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2188 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2189 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2190 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2191 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2192 unstable(feature = "integer_atomics", issue = "32976"),
2193 "u64", "../../../std/primitive.u64.html",
2195 atomic_umin, atomic_umax,
2197 "AtomicU64::new(0)",
2198 u64 AtomicU64 ATOMIC_U64_INIT
2200 #[cfg(target_has_atomic_load_store = "128")]
2202 cfg(target_has_atomic = "128"),
2203 unstable(feature = "integer_atomics", issue = "32976"),
2204 unstable(feature = "integer_atomics", issue = "32976"),
2205 unstable(feature = "integer_atomics", issue = "32976"),
2206 unstable(feature = "integer_atomics", issue = "32976"),
2207 unstable(feature = "integer_atomics", issue = "32976"),
2208 unstable(feature = "integer_atomics", issue = "32976"),
2209 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2210 unstable(feature = "integer_atomics", issue = "32976"),
2211 "i128", "../../../std/primitive.i128.html",
2212 "#![feature(integer_atomics)]\n\n",
2213 atomic_min, atomic_max,
2215 "AtomicI128::new(0)",
2216 i128 AtomicI128 ATOMIC_I128_INIT
2218 #[cfg(target_has_atomic_load_store = "128")]
2220 cfg(target_has_atomic = "128"),
2221 unstable(feature = "integer_atomics", issue = "32976"),
2222 unstable(feature = "integer_atomics", issue = "32976"),
2223 unstable(feature = "integer_atomics", issue = "32976"),
2224 unstable(feature = "integer_atomics", issue = "32976"),
2225 unstable(feature = "integer_atomics", issue = "32976"),
2226 unstable(feature = "integer_atomics", issue = "32976"),
2227 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2228 unstable(feature = "integer_atomics", issue = "32976"),
2229 "u128", "../../../std/primitive.u128.html",
2230 "#![feature(integer_atomics)]\n\n",
2231 atomic_umin, atomic_umax,
2233 "AtomicU128::new(0)",
2234 u128 AtomicU128 ATOMIC_U128_INIT
2236 #[cfg(target_has_atomic_load_store = "ptr")]
2237 #[cfg(target_pointer_width = "16")]
2238 macro_rules! ptr_width {
2243 #[cfg(target_has_atomic_load_store = "ptr")]
2244 #[cfg(target_pointer_width = "32")]
2245 macro_rules! ptr_width {
2250 #[cfg(target_has_atomic_load_store = "ptr")]
2251 #[cfg(target_pointer_width = "64")]
2252 macro_rules! ptr_width {
2257 #[cfg(target_has_atomic_load_store = "ptr")]
2259 cfg(target_has_atomic = "ptr"),
2260 stable(feature = "rust1", since = "1.0.0"),
2261 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2262 stable(feature = "atomic_debug", since = "1.3.0"),
2263 stable(feature = "atomic_access", since = "1.15.0"),
2264 stable(feature = "atomic_from", since = "1.23.0"),
2265 stable(feature = "atomic_nand", since = "1.27.0"),
2266 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2267 stable(feature = "rust1", since = "1.0.0"),
2268 "isize", "../../../std/primitive.isize.html",
2270 atomic_min, atomic_max,
2272 "AtomicIsize::new(0)",
2273 isize AtomicIsize ATOMIC_ISIZE_INIT
2275 #[cfg(target_has_atomic_load_store = "ptr")]
2277 cfg(target_has_atomic = "ptr"),
2278 stable(feature = "rust1", since = "1.0.0"),
2279 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2280 stable(feature = "atomic_debug", since = "1.3.0"),
2281 stable(feature = "atomic_access", since = "1.15.0"),
2282 stable(feature = "atomic_from", since = "1.23.0"),
2283 stable(feature = "atomic_nand", since = "1.27.0"),
2284 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2285 stable(feature = "rust1", since = "1.0.0"),
2286 "usize", "../../../std/primitive.usize.html",
2288 atomic_umin, atomic_umax,
2290 "AtomicUsize::new(0)",
2291 usize AtomicUsize ATOMIC_USIZE_INIT
2295 #[cfg(target_has_atomic = "8")]
2296 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2307 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
2309 Release => intrinsics::atomic_store_rel(dst, val),
2310 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2311 SeqCst => intrinsics::atomic_store(dst, val),
2312 Acquire => panic!("there is no such thing as an acquire store"),
2313 AcqRel => panic!("there is no such thing as an acquire/release store"),
2318 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
2320 Acquire => intrinsics::atomic_load_acq(dst),
2321 Relaxed => intrinsics::atomic_load_relaxed(dst),
2322 SeqCst => intrinsics::atomic_load(dst),
2323 Release => panic!("there is no such thing as a release load"),
2324 AcqRel => panic!("there is no such thing as an acquire/release load"),
2329 #[cfg(target_has_atomic = "8")]
2330 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2332 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2333 Release => intrinsics::atomic_xchg_rel(dst, val),
2334 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2335 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2336 SeqCst => intrinsics::atomic_xchg(dst, val),
2340 /// Returns the previous value (like __sync_fetch_and_add).
2342 #[cfg(target_has_atomic = "8")]
2343 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2345 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2346 Release => intrinsics::atomic_xadd_rel(dst, val),
2347 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2348 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2349 SeqCst => intrinsics::atomic_xadd(dst, val),
2353 /// Returns the previous value (like __sync_fetch_and_sub).
2355 #[cfg(target_has_atomic = "8")]
2356 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2358 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2359 Release => intrinsics::atomic_xsub_rel(dst, val),
2360 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2361 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2362 SeqCst => intrinsics::atomic_xsub(dst, val),
2367 #[cfg(target_has_atomic = "8")]
2368 unsafe fn atomic_compare_exchange<T: Copy>(
2375 let (val, ok) = match (success, failure) {
2376 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2377 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2378 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2379 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2380 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2381 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2382 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2383 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2384 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2385 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2386 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2387 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2389 if ok { Ok(val) } else { Err(val) }
2393 #[cfg(target_has_atomic = "8")]
2394 unsafe fn atomic_compare_exchange_weak<T: Copy>(
2401 let (val, ok) = match (success, failure) {
2402 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2403 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2404 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2405 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2406 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2407 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2408 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2409 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2410 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2411 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2412 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2413 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2415 if ok { Ok(val) } else { Err(val) }
2419 #[cfg(target_has_atomic = "8")]
2420 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2422 Acquire => intrinsics::atomic_and_acq(dst, val),
2423 Release => intrinsics::atomic_and_rel(dst, val),
2424 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2425 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2426 SeqCst => intrinsics::atomic_and(dst, val),
2431 #[cfg(target_has_atomic = "8")]
2432 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2434 Acquire => intrinsics::atomic_nand_acq(dst, val),
2435 Release => intrinsics::atomic_nand_rel(dst, val),
2436 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2437 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2438 SeqCst => intrinsics::atomic_nand(dst, val),
2443 #[cfg(target_has_atomic = "8")]
2444 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2446 Acquire => intrinsics::atomic_or_acq(dst, val),
2447 Release => intrinsics::atomic_or_rel(dst, val),
2448 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2449 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2450 SeqCst => intrinsics::atomic_or(dst, val),
2455 #[cfg(target_has_atomic = "8")]
2456 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2458 Acquire => intrinsics::atomic_xor_acq(dst, val),
2459 Release => intrinsics::atomic_xor_rel(dst, val),
2460 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2461 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2462 SeqCst => intrinsics::atomic_xor(dst, val),
2466 /// returns the max value (signed comparison)
2468 #[cfg(target_has_atomic = "8")]
2469 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2471 Acquire => intrinsics::atomic_max_acq(dst, val),
2472 Release => intrinsics::atomic_max_rel(dst, val),
2473 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2474 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2475 SeqCst => intrinsics::atomic_max(dst, val),
2479 /// returns the min value (signed comparison)
2481 #[cfg(target_has_atomic = "8")]
2482 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2484 Acquire => intrinsics::atomic_min_acq(dst, val),
2485 Release => intrinsics::atomic_min_rel(dst, val),
2486 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2487 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2488 SeqCst => intrinsics::atomic_min(dst, val),
2492 /// returns the max value (unsigned comparison)
2494 #[cfg(target_has_atomic = "8")]
2495 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2497 Acquire => intrinsics::atomic_umax_acq(dst, val),
2498 Release => intrinsics::atomic_umax_rel(dst, val),
2499 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2500 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2501 SeqCst => intrinsics::atomic_umax(dst, val),
2505 /// returns the min value (unsigned comparison)
2507 #[cfg(target_has_atomic = "8")]
2508 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2510 Acquire => intrinsics::atomic_umin_acq(dst, val),
2511 Release => intrinsics::atomic_umin_rel(dst, val),
2512 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2513 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2514 SeqCst => intrinsics::atomic_umin(dst, val),
2518 /// An atomic fence.
2520 /// Depending on the specified order, a fence prevents the compiler and CPU from
2521 /// reordering certain types of memory operations around it.
2522 /// That creates synchronizes-with relationships between it and atomic operations
2523 /// or fences in other threads.
2525 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2526 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2527 /// exist operations X and Y, both operating on some atomic object 'M' such
2528 /// that A is sequenced before X, Y is synchronized before B and Y observes
2529 /// the change to M. This provides a happens-before dependence between A and B.
2532 /// Thread 1 Thread 2
2534 /// fence(Release); A --------------
2535 /// x.store(3, Relaxed); X --------- |
2538 /// -------------> Y if x.load(Relaxed) == 3 {
2539 /// |-------> B fence(Acquire);
2544 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2547 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2548 /// and [`Release`] semantics, participates in the global program order of the
2549 /// other [`SeqCst`] operations and/or fences.
2551 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2555 /// Panics if `order` is [`Relaxed`].
2560 /// use std::sync::atomic::AtomicBool;
2561 /// use std::sync::atomic::fence;
2562 /// use std::sync::atomic::Ordering;
2564 /// // A mutual exclusion primitive based on spinlock.
2565 /// pub struct Mutex {
2566 /// flag: AtomicBool,
2570 /// pub fn new() -> Mutex {
2572 /// flag: AtomicBool::new(false),
2576 /// pub fn lock(&self) {
2577 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2578 /// // This fence synchronizes-with store in `unlock`.
2579 /// fence(Ordering::Acquire);
2582 /// pub fn unlock(&self) {
2583 /// self.flag.store(false, Ordering::Release);
2588 /// [`Ordering`]: enum.Ordering.html
2589 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2590 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2591 /// [`Release`]: enum.Ordering.html#variant.Release
2592 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2593 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2595 #[stable(feature = "rust1", since = "1.0.0")]
2596 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2597 pub fn fence(order: Ordering) {
2598 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2599 // they will cause LLVM to abort. The wasm instruction set doesn't have
2600 // fences right now. There's discussion online about the best way for tools
2601 // to conventionally implement fences at
2602 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2603 // follow that discussion and implement a solution when one comes about!
2604 #[cfg(not(target_arch = "wasm32"))]
2605 // SAFETY: using an atomic fence is safe.
2608 Acquire => intrinsics::atomic_fence_acq(),
2609 Release => intrinsics::atomic_fence_rel(),
2610 AcqRel => intrinsics::atomic_fence_acqrel(),
2611 SeqCst => intrinsics::atomic_fence(),
2612 Relaxed => panic!("there is no such thing as a relaxed fence"),
2617 /// A compiler memory fence.
2619 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2620 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2621 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2622 /// or writes from before or after the call to the other side of the call to
2623 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2624 /// from doing such re-ordering. This is not a problem in a single-threaded,
2625 /// execution context, but when other threads may modify memory at the same
2626 /// time, stronger synchronization primitives such as [`fence`] are required.
2628 /// The re-ordering prevented by the different ordering semantics are:
2630 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2631 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2632 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2633 /// - with [`AcqRel`], both of the above rules are enforced.
2635 /// `compiler_fence` is generally only useful for preventing a thread from
2636 /// racing *with itself*. That is, if a given thread is executing one piece
2637 /// of code, and is then interrupted, and starts executing code elsewhere
2638 /// (while still in the same thread, and conceptually still on the same
2639 /// core). In traditional programs, this can only occur when a signal
2640 /// handler is registered. In more low-level code, such situations can also
2641 /// arise when handling interrupts, when implementing green threads with
2642 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2643 /// discussion of [memory barriers].
2647 /// Panics if `order` is [`Relaxed`].
2651 /// Without `compiler_fence`, the `assert_eq!` in following code
2652 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2653 /// To see why, remember that the compiler is free to swap the stores to
2654 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2655 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2656 /// after `IS_READY` is updated, then the signal handler will see
2657 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2658 /// Using a `compiler_fence` remedies this situation.
2661 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2662 /// use std::sync::atomic::Ordering;
2663 /// use std::sync::atomic::compiler_fence;
2665 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2666 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2669 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2670 /// // prevent earlier writes from being moved beyond this point
2671 /// compiler_fence(Ordering::Release);
2672 /// IS_READY.store(true, Ordering::Relaxed);
2675 /// fn signal_handler() {
2676 /// if IS_READY.load(Ordering::Relaxed) {
2677 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2682 /// [`fence`]: fn.fence.html
2683 /// [`Ordering`]: enum.Ordering.html
2684 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2685 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2686 /// [`Release`]: enum.Ordering.html#variant.Release
2687 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2688 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2689 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2691 #[stable(feature = "compiler_fences", since = "1.21.0")]
2692 pub fn compiler_fence(order: Ordering) {
2693 // SAFETY: using an atomic fence is safe.
2696 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2697 Release => intrinsics::atomic_singlethreadfence_rel(),
2698 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2699 SeqCst => intrinsics::atomic_singlethreadfence(),
2700 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2705 #[cfg(target_has_atomic_load_store = "8")]
2706 #[stable(feature = "atomic_debug", since = "1.3.0")]
2707 impl fmt::Debug for AtomicBool {
2708 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2709 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2713 #[cfg(target_has_atomic_load_store = "ptr")]
2714 #[stable(feature = "atomic_debug", since = "1.3.0")]
2715 impl<T> fmt::Debug for AtomicPtr<T> {
2716 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2717 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2721 #[cfg(target_has_atomic_load_store = "ptr")]
2722 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2723 impl<T> fmt::Pointer for AtomicPtr<T> {
2724 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2725 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)