3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 #![stable(feature = "rust1", since = "1.0.0")]
116 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
117 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
119 use self::Ordering::*;
121 use crate::cell::UnsafeCell;
123 use crate::intrinsics;
125 use crate::hint::spin_loop;
127 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
129 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
130 /// power or switching hyper-threads.
132 /// This function is different from [`std::thread::yield_now`] which directly yields to the
133 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
135 /// A common use case for `spin_loop_hint` is implementing bounded optimistic spinning in a CAS
136 /// loop in synchronization primitives. To avoid problems like priority inversion, it is strongly
137 /// recommended that the spin loop is terminated after a finite amount of iterations and an
138 /// appropriate blocking syscall is made.
140 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
141 /// do anything at all.
143 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
144 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
145 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
147 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
148 pub fn spin_loop_hint() {
152 /// A boolean type which can be safely shared between threads.
154 /// This type has the same in-memory representation as a [`bool`].
156 /// [`bool`]: ../../../std/primitive.bool.html
157 #[cfg(target_has_atomic_load_store = "8")]
158 #[stable(feature = "rust1", since = "1.0.0")]
160 pub struct AtomicBool {
164 #[cfg(target_has_atomic_load_store = "8")]
165 #[stable(feature = "rust1", since = "1.0.0")]
166 impl Default for AtomicBool {
167 /// Creates an `AtomicBool` initialized to `false`.
168 fn default() -> Self {
173 // Send is implicitly implemented for AtomicBool.
174 #[cfg(target_has_atomic_load_store = "8")]
175 #[stable(feature = "rust1", since = "1.0.0")]
176 unsafe impl Sync for AtomicBool {}
178 /// A raw pointer type which can be safely shared between threads.
180 /// This type has the same in-memory representation as a `*mut T`.
181 #[cfg(target_has_atomic_load_store = "ptr")]
182 #[stable(feature = "rust1", since = "1.0.0")]
183 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
184 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
185 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
186 pub struct AtomicPtr<T> {
187 p: UnsafeCell<*mut T>,
190 #[cfg(target_has_atomic_load_store = "ptr")]
191 #[stable(feature = "rust1", since = "1.0.0")]
192 impl<T> Default for AtomicPtr<T> {
193 /// Creates a null `AtomicPtr<T>`.
194 fn default() -> AtomicPtr<T> {
195 AtomicPtr::new(crate::ptr::null_mut())
199 #[cfg(target_has_atomic_load_store = "ptr")]
200 #[stable(feature = "rust1", since = "1.0.0")]
201 unsafe impl<T> Send for AtomicPtr<T> {}
202 #[cfg(target_has_atomic_load_store = "ptr")]
203 #[stable(feature = "rust1", since = "1.0.0")]
204 unsafe impl<T> Sync for AtomicPtr<T> {}
206 /// Atomic memory orderings
208 /// Memory orderings specify the way atomic operations synchronize memory.
209 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
210 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
211 /// operations synchronize other memory while additionally preserving a total order of such
212 /// operations across all threads.
214 /// Rust's memory orderings are [the same as those of
215 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
217 /// For more information see the [nomicon].
219 /// [nomicon]: ../../../nomicon/atomics.html
220 /// [Ordering::Relaxed]: #variant.Relaxed
221 /// [Ordering::SeqCst]: #variant.SeqCst
222 #[stable(feature = "rust1", since = "1.0.0")]
223 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
226 /// No ordering constraints, only atomic operations.
228 /// Corresponds to [`memory_order_relaxed`] in C++20.
230 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
231 #[stable(feature = "rust1", since = "1.0.0")]
233 /// When coupled with a store, all previous operations become ordered
234 /// before any load of this value with [`Acquire`] (or stronger) ordering.
235 /// In particular, all previous writes become visible to all threads
236 /// that perform an [`Acquire`] (or stronger) load of this value.
238 /// Notice that using this ordering for an operation that combines loads
239 /// and stores leads to a [`Relaxed`] load operation!
241 /// This ordering is only applicable for operations that can perform a store.
243 /// Corresponds to [`memory_order_release`] in C++20.
245 /// [`Release`]: #variant.Release
246 /// [`Acquire`]: #variant.Acquire
247 /// [`Relaxed`]: #variant.Relaxed
248 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
249 #[stable(feature = "rust1", since = "1.0.0")]
251 /// When coupled with a load, if the loaded value was written by a store operation with
252 /// [`Release`] (or stronger) ordering, then all subsequent operations
253 /// become ordered after that store. In particular, all subsequent loads will see data
254 /// written before the store.
256 /// Notice that using this ordering for an operation that combines loads
257 /// and stores leads to a [`Relaxed`] store operation!
259 /// This ordering is only applicable for operations that can perform a load.
261 /// Corresponds to [`memory_order_acquire`] in C++20.
263 /// [`Acquire`]: #variant.Acquire
264 /// [`Release`]: #variant.Release
265 /// [`Relaxed`]: #variant.Relaxed
266 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
267 #[stable(feature = "rust1", since = "1.0.0")]
269 /// Has the effects of both [`Acquire`] and [`Release`] together:
270 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
272 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
273 /// not performing any store and hence it has just [`Acquire`] ordering. However,
274 /// `AcqRel` will never perform [`Relaxed`] accesses.
276 /// This ordering is only applicable for operations that combine both loads and stores.
278 /// Corresponds to [`memory_order_acq_rel`] in C++20.
280 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
281 /// [`Acquire`]: #variant.Acquire
282 /// [`Release`]: #variant.Release
283 /// [`Relaxed`]: #variant.Relaxed
284 #[stable(feature = "rust1", since = "1.0.0")]
286 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
287 /// operations, respectively) with the additional guarantee that all threads see all
288 /// sequentially consistent operations in the same order.
290 /// Corresponds to [`memory_order_seq_cst`] in C++20.
292 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
293 /// [`Acquire`]: #variant.Acquire
294 /// [`Release`]: #variant.Release
295 /// [`AcqRel`]: #variant.AcqRel
296 #[stable(feature = "rust1", since = "1.0.0")]
300 /// An [`AtomicBool`] initialized to `false`.
302 /// [`AtomicBool`]: struct.AtomicBool.html
303 #[cfg(target_has_atomic_load_store = "8")]
304 #[stable(feature = "rust1", since = "1.0.0")]
307 reason = "the `new` function is now preferred",
308 suggestion = "AtomicBool::new(false)"
310 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
312 #[cfg(target_has_atomic_load_store = "8")]
314 /// Creates a new `AtomicBool`.
319 /// use std::sync::atomic::AtomicBool;
321 /// let atomic_true = AtomicBool::new(true);
322 /// let atomic_false = AtomicBool::new(false);
325 #[stable(feature = "rust1", since = "1.0.0")]
326 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
327 pub const fn new(v: bool) -> AtomicBool {
328 AtomicBool { v: UnsafeCell::new(v as u8) }
331 /// Returns a mutable reference to the underlying [`bool`].
333 /// This is safe because the mutable reference guarantees that no other threads are
334 /// concurrently accessing the atomic data.
336 /// [`bool`]: ../../../std/primitive.bool.html
341 /// use std::sync::atomic::{AtomicBool, Ordering};
343 /// let mut some_bool = AtomicBool::new(true);
344 /// assert_eq!(*some_bool.get_mut(), true);
345 /// *some_bool.get_mut() = false;
346 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
349 #[stable(feature = "atomic_access", since = "1.15.0")]
350 pub fn get_mut(&mut self) -> &mut bool {
351 // SAFETY: the mutable reference guarantees unique ownership
352 unsafe { &mut *(self.v.get() as *mut bool) }
355 /// Consumes the atomic and returns the contained value.
357 /// This is safe because passing `self` by value guarantees that no other threads are
358 /// concurrently accessing the atomic data.
363 /// use std::sync::atomic::AtomicBool;
365 /// let some_bool = AtomicBool::new(true);
366 /// assert_eq!(some_bool.into_inner(), true);
369 #[stable(feature = "atomic_access", since = "1.15.0")]
370 pub fn into_inner(self) -> bool {
371 self.v.into_inner() != 0
374 /// Loads a value from the bool.
376 /// `load` takes an [`Ordering`] argument which describes the memory ordering
377 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
381 /// Panics if `order` is [`Release`] or [`AcqRel`].
383 /// [`Ordering`]: enum.Ordering.html
384 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
385 /// [`Release`]: enum.Ordering.html#variant.Release
386 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
387 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
388 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
393 /// use std::sync::atomic::{AtomicBool, Ordering};
395 /// let some_bool = AtomicBool::new(true);
397 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
400 #[stable(feature = "rust1", since = "1.0.0")]
401 pub fn load(&self, order: Ordering) -> bool {
402 // SAFETY: data races are prevented by atomic intrinsics
403 unsafe { atomic_load(self.v.get(), order) != 0 }
406 /// Stores a value into the bool.
408 /// `store` takes an [`Ordering`] argument which describes the memory ordering
409 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
413 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
415 /// [`Ordering`]: enum.Ordering.html
416 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
417 /// [`Release`]: enum.Ordering.html#variant.Release
418 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
419 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
420 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
425 /// use std::sync::atomic::{AtomicBool, Ordering};
427 /// let some_bool = AtomicBool::new(true);
429 /// some_bool.store(false, Ordering::Relaxed);
430 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
433 #[stable(feature = "rust1", since = "1.0.0")]
434 pub fn store(&self, val: bool, order: Ordering) {
435 // SAFETY: data races are prevented by atomic intrinsics
437 atomic_store(self.v.get(), val as u8, order);
441 /// Stores a value into the bool, returning the previous value.
443 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
444 /// of this operation. All ordering modes are possible. Note that using
445 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
446 /// using [`Release`] makes the load part [`Relaxed`].
448 /// [`Ordering`]: enum.Ordering.html
449 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
450 /// [`Release`]: enum.Ordering.html#variant.Release
451 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
456 /// use std::sync::atomic::{AtomicBool, Ordering};
458 /// let some_bool = AtomicBool::new(true);
460 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
461 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
464 #[stable(feature = "rust1", since = "1.0.0")]
465 #[cfg(target_has_atomic = "8")]
466 pub fn swap(&self, val: bool, order: Ordering) -> bool {
467 // SAFETY: data races are prevented by atomic intrinsics
468 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
471 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
473 /// The return value is always the previous value. If it is equal to `current`, then the value
476 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
477 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
478 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
479 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
480 /// happens, and using [`Release`] makes the load part [`Relaxed`].
482 /// [`Ordering`]: enum.Ordering.html
483 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
484 /// [`Release`]: enum.Ordering.html#variant.Release
485 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
486 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
487 /// [`bool`]: ../../../std/primitive.bool.html
492 /// use std::sync::atomic::{AtomicBool, Ordering};
494 /// let some_bool = AtomicBool::new(true);
496 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
497 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
499 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
500 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
503 #[stable(feature = "rust1", since = "1.0.0")]
504 #[cfg(target_has_atomic = "8")]
505 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
506 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
512 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
514 /// The return value is a result indicating whether the new value was written and containing
515 /// the previous value. On success this value is guaranteed to be equal to `current`.
517 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
518 /// ordering of this operation. The first describes the required ordering if the
519 /// operation succeeds while the second describes the required ordering when the
520 /// operation fails. Using [`Acquire`] as success ordering makes the store part
521 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
522 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
523 /// and must be equivalent to or weaker than the success ordering.
526 /// [`bool`]: ../../../std/primitive.bool.html
527 /// [`Ordering`]: enum.Ordering.html
528 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
529 /// [`Release`]: enum.Ordering.html#variant.Release
530 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
531 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
536 /// use std::sync::atomic::{AtomicBool, Ordering};
538 /// let some_bool = AtomicBool::new(true);
540 /// assert_eq!(some_bool.compare_exchange(true,
542 /// Ordering::Acquire,
543 /// Ordering::Relaxed),
545 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
547 /// assert_eq!(some_bool.compare_exchange(true, true,
548 /// Ordering::SeqCst,
549 /// Ordering::Acquire),
551 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
554 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
555 #[cfg(target_has_atomic = "8")]
556 pub fn compare_exchange(
562 ) -> Result<bool, bool> {
563 // SAFETY: data races are prevented by atomic intrinsics
565 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
568 Err(x) => Err(x != 0),
572 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
574 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
575 /// comparison succeeds, which can result in more efficient code on some platforms. The
576 /// return value is a result indicating whether the new value was written and containing the
579 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
580 /// ordering of this operation. The first describes the required ordering if the
581 /// operation succeeds while the second describes the required ordering when the
582 /// operation fails. Using [`Acquire`] as success ordering makes the store part
583 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
584 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
585 /// and must be equivalent to or weaker than the success ordering.
587 /// [`bool`]: ../../../std/primitive.bool.html
588 /// [`compare_exchange`]: #method.compare_exchange
589 /// [`Ordering`]: enum.Ordering.html
590 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
591 /// [`Release`]: enum.Ordering.html#variant.Release
592 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
593 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
598 /// use std::sync::atomic::{AtomicBool, Ordering};
600 /// let val = AtomicBool::new(false);
603 /// let mut old = val.load(Ordering::Relaxed);
605 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
607 /// Err(x) => old = x,
612 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
613 #[cfg(target_has_atomic = "8")]
614 pub fn compare_exchange_weak(
620 ) -> Result<bool, bool> {
621 // SAFETY: data races are prevented by atomic intrinsics
623 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
626 Err(x) => Err(x != 0),
630 /// Logical "and" with a boolean value.
632 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
633 /// the new value to the result.
635 /// Returns the previous value.
637 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
638 /// of this operation. All ordering modes are possible. Note that using
639 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
640 /// using [`Release`] makes the load part [`Relaxed`].
642 /// [`Ordering`]: enum.Ordering.html
643 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
644 /// [`Release`]: enum.Ordering.html#variant.Release
645 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
650 /// use std::sync::atomic::{AtomicBool, Ordering};
652 /// let foo = AtomicBool::new(true);
653 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
654 /// assert_eq!(foo.load(Ordering::SeqCst), false);
656 /// let foo = AtomicBool::new(true);
657 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
658 /// assert_eq!(foo.load(Ordering::SeqCst), true);
660 /// let foo = AtomicBool::new(false);
661 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
662 /// assert_eq!(foo.load(Ordering::SeqCst), false);
665 #[stable(feature = "rust1", since = "1.0.0")]
666 #[cfg(target_has_atomic = "8")]
667 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
668 // SAFETY: data races are prevented by atomic intrinsics
669 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
672 /// Logical "nand" with a boolean value.
674 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
675 /// the new value to the result.
677 /// Returns the previous value.
679 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
680 /// of this operation. All ordering modes are possible. Note that using
681 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
682 /// using [`Release`] makes the load part [`Relaxed`].
684 /// [`Ordering`]: enum.Ordering.html
685 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
686 /// [`Release`]: enum.Ordering.html#variant.Release
687 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
692 /// use std::sync::atomic::{AtomicBool, Ordering};
694 /// let foo = AtomicBool::new(true);
695 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
696 /// assert_eq!(foo.load(Ordering::SeqCst), true);
698 /// let foo = AtomicBool::new(true);
699 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
700 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
701 /// assert_eq!(foo.load(Ordering::SeqCst), false);
703 /// let foo = AtomicBool::new(false);
704 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
705 /// assert_eq!(foo.load(Ordering::SeqCst), true);
708 #[stable(feature = "rust1", since = "1.0.0")]
709 #[cfg(target_has_atomic = "8")]
710 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
711 // We can't use atomic_nand here because it can result in a bool with
712 // an invalid value. This happens because the atomic operation is done
713 // with an 8-bit integer internally, which would set the upper 7 bits.
714 // So we just use fetch_xor or swap instead.
717 // We must invert the bool.
718 self.fetch_xor(true, order)
720 // !(x & false) == true
721 // We must set the bool to true.
722 self.swap(true, order)
726 /// Logical "or" with a boolean value.
728 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
729 /// new value to the result.
731 /// Returns the previous value.
733 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
734 /// of this operation. All ordering modes are possible. Note that using
735 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
736 /// using [`Release`] makes the load part [`Relaxed`].
738 /// [`Ordering`]: enum.Ordering.html
739 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
740 /// [`Release`]: enum.Ordering.html#variant.Release
741 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
746 /// use std::sync::atomic::{AtomicBool, Ordering};
748 /// let foo = AtomicBool::new(true);
749 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
750 /// assert_eq!(foo.load(Ordering::SeqCst), true);
752 /// let foo = AtomicBool::new(true);
753 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
754 /// assert_eq!(foo.load(Ordering::SeqCst), true);
756 /// let foo = AtomicBool::new(false);
757 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
758 /// assert_eq!(foo.load(Ordering::SeqCst), false);
761 #[stable(feature = "rust1", since = "1.0.0")]
762 #[cfg(target_has_atomic = "8")]
763 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
764 // SAFETY: data races are prevented by atomic intrinsics
765 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
768 /// Logical "xor" with a boolean value.
770 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
771 /// the new value to the result.
773 /// Returns the previous value.
775 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
776 /// of this operation. All ordering modes are possible. Note that using
777 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
778 /// using [`Release`] makes the load part [`Relaxed`].
780 /// [`Ordering`]: enum.Ordering.html
781 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
782 /// [`Release`]: enum.Ordering.html#variant.Release
783 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
788 /// use std::sync::atomic::{AtomicBool, Ordering};
790 /// let foo = AtomicBool::new(true);
791 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
792 /// assert_eq!(foo.load(Ordering::SeqCst), true);
794 /// let foo = AtomicBool::new(true);
795 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
796 /// assert_eq!(foo.load(Ordering::SeqCst), false);
798 /// let foo = AtomicBool::new(false);
799 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
800 /// assert_eq!(foo.load(Ordering::SeqCst), false);
803 #[stable(feature = "rust1", since = "1.0.0")]
804 #[cfg(target_has_atomic = "8")]
805 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
806 // SAFETY: data races are prevented by atomic intrinsics
807 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
810 /// Returns a mutable pointer to the underlying [`bool`].
812 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
813 /// This method is mostly useful for FFI, where the function signature may use
814 /// `*mut bool` instead of `&AtomicBool`.
816 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
817 /// atomic types work with interior mutability. All modifications of an atomic change the value
818 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
819 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
820 /// restriction: operations on it must be atomic.
822 /// [`bool`]: ../../../std/primitive.bool.html
826 /// ```ignore (extern-declaration)
828 /// use std::sync::atomic::AtomicBool;
830 /// fn my_atomic_op(arg: *mut bool);
833 /// let mut atomic = AtomicBool::new(true);
835 /// my_atomic_op(atomic.as_mut_ptr());
840 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
841 pub fn as_mut_ptr(&self) -> *mut bool {
842 self.v.get() as *mut bool
846 #[cfg(target_has_atomic_load_store = "ptr")]
847 impl<T> AtomicPtr<T> {
848 /// Creates a new `AtomicPtr`.
853 /// use std::sync::atomic::AtomicPtr;
855 /// let ptr = &mut 5;
856 /// let atomic_ptr = AtomicPtr::new(ptr);
859 #[stable(feature = "rust1", since = "1.0.0")]
860 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
861 pub const fn new(p: *mut T) -> AtomicPtr<T> {
862 AtomicPtr { p: UnsafeCell::new(p) }
865 /// Returns a mutable reference to the underlying pointer.
867 /// This is safe because the mutable reference guarantees that no other threads are
868 /// concurrently accessing the atomic data.
873 /// use std::sync::atomic::{AtomicPtr, Ordering};
875 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
876 /// *atomic_ptr.get_mut() = &mut 5;
877 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
880 #[stable(feature = "atomic_access", since = "1.15.0")]
881 pub fn get_mut(&mut self) -> &mut *mut T {
882 // SAFETY: the mutable reference guarantees unique ownership
883 unsafe { &mut *self.p.get() }
886 /// Consumes the atomic and returns the contained value.
888 /// This is safe because passing `self` by value guarantees that no other threads are
889 /// concurrently accessing the atomic data.
894 /// use std::sync::atomic::AtomicPtr;
896 /// let atomic_ptr = AtomicPtr::new(&mut 5);
897 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
900 #[stable(feature = "atomic_access", since = "1.15.0")]
901 pub fn into_inner(self) -> *mut T {
905 /// Loads a value from the pointer.
907 /// `load` takes an [`Ordering`] argument which describes the memory ordering
908 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
912 /// Panics if `order` is [`Release`] or [`AcqRel`].
914 /// [`Ordering`]: enum.Ordering.html
915 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
916 /// [`Release`]: enum.Ordering.html#variant.Release
917 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
918 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
919 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
924 /// use std::sync::atomic::{AtomicPtr, Ordering};
926 /// let ptr = &mut 5;
927 /// let some_ptr = AtomicPtr::new(ptr);
929 /// let value = some_ptr.load(Ordering::Relaxed);
932 #[stable(feature = "rust1", since = "1.0.0")]
933 pub fn load(&self, order: Ordering) -> *mut T {
934 // SAFETY: data races are prevented by atomic intrinsics
935 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
938 /// Stores a value into the pointer.
940 /// `store` takes an [`Ordering`] argument which describes the memory ordering
941 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
945 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
947 /// [`Ordering`]: enum.Ordering.html
948 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
949 /// [`Release`]: enum.Ordering.html#variant.Release
950 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
951 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
952 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
957 /// use std::sync::atomic::{AtomicPtr, Ordering};
959 /// let ptr = &mut 5;
960 /// let some_ptr = AtomicPtr::new(ptr);
962 /// let other_ptr = &mut 10;
964 /// some_ptr.store(other_ptr, Ordering::Relaxed);
967 #[stable(feature = "rust1", since = "1.0.0")]
968 pub fn store(&self, ptr: *mut T, order: Ordering) {
969 // SAFETY: data races are prevented by atomic intrinsics
971 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
975 /// Stores a value into the pointer, returning the previous value.
977 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
978 /// of this operation. All ordering modes are possible. Note that using
979 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
980 /// using [`Release`] makes the load part [`Relaxed`].
982 /// [`Ordering`]: enum.Ordering.html
983 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
984 /// [`Release`]: enum.Ordering.html#variant.Release
985 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
990 /// use std::sync::atomic::{AtomicPtr, Ordering};
992 /// let ptr = &mut 5;
993 /// let some_ptr = AtomicPtr::new(ptr);
995 /// let other_ptr = &mut 10;
997 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1000 #[stable(feature = "rust1", since = "1.0.0")]
1001 #[cfg(target_has_atomic = "ptr")]
1002 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1003 // SAFETY: data races are prevented by atomic intrinsics
1004 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1007 /// Stores a value into the pointer if the current value is the same as the `current` value.
1009 /// The return value is always the previous value. If it is equal to `current`, then the value
1012 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1013 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1014 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1015 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1016 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1018 /// [`Ordering`]: enum.Ordering.html
1019 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1020 /// [`Release`]: enum.Ordering.html#variant.Release
1021 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1022 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1027 /// use std::sync::atomic::{AtomicPtr, Ordering};
1029 /// let ptr = &mut 5;
1030 /// let some_ptr = AtomicPtr::new(ptr);
1032 /// let other_ptr = &mut 10;
1034 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1037 #[stable(feature = "rust1", since = "1.0.0")]
1038 #[cfg(target_has_atomic = "ptr")]
1039 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1040 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1046 /// Stores a value into the pointer if the current value is the same as the `current` value.
1048 /// The return value is a result indicating whether the new value was written and containing
1049 /// the previous value. On success this value is guaranteed to be equal to `current`.
1051 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1052 /// ordering of this operation. The first describes the required ordering if the
1053 /// operation succeeds while the second describes the required ordering when the
1054 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1055 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1056 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1057 /// and must be equivalent to or weaker than the success ordering.
1059 /// [`Ordering`]: enum.Ordering.html
1060 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1061 /// [`Release`]: enum.Ordering.html#variant.Release
1062 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1063 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1068 /// use std::sync::atomic::{AtomicPtr, Ordering};
1070 /// let ptr = &mut 5;
1071 /// let some_ptr = AtomicPtr::new(ptr);
1073 /// let other_ptr = &mut 10;
1075 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1076 /// Ordering::SeqCst, Ordering::Relaxed);
1079 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1080 #[cfg(target_has_atomic = "ptr")]
1081 pub fn compare_exchange(
1087 ) -> Result<*mut T, *mut T> {
1088 // SAFETY: data races are prevented by atomic intrinsics
1090 let res = atomic_compare_exchange(
1091 self.p.get() as *mut usize,
1098 Ok(x) => Ok(x as *mut T),
1099 Err(x) => Err(x as *mut T),
1104 /// Stores a value into the pointer if the current value is the same as the `current` value.
1106 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1107 /// comparison succeeds, which can result in more efficient code on some platforms. The
1108 /// return value is a result indicating whether the new value was written and containing the
1111 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1112 /// ordering of this operation. The first describes the required ordering if the
1113 /// operation succeeds while the second describes the required ordering when the
1114 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1115 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1116 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1117 /// and must be equivalent to or weaker than the success ordering.
1119 /// [`compare_exchange`]: #method.compare_exchange
1120 /// [`Ordering`]: enum.Ordering.html
1121 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1122 /// [`Release`]: enum.Ordering.html#variant.Release
1123 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1124 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1129 /// use std::sync::atomic::{AtomicPtr, Ordering};
1131 /// let some_ptr = AtomicPtr::new(&mut 5);
1133 /// let new = &mut 10;
1134 /// let mut old = some_ptr.load(Ordering::Relaxed);
1136 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1138 /// Err(x) => old = x,
1143 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1144 #[cfg(target_has_atomic = "ptr")]
1145 pub fn compare_exchange_weak(
1151 ) -> Result<*mut T, *mut T> {
1152 // SAFETY: data races are prevented by atomic intrinsics
1154 let res = atomic_compare_exchange_weak(
1155 self.p.get() as *mut usize,
1162 Ok(x) => Ok(x as *mut T),
1163 Err(x) => Err(x as *mut T),
1169 #[cfg(target_has_atomic_load_store = "8")]
1170 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1171 impl From<bool> for AtomicBool {
1172 /// Converts a `bool` into an `AtomicBool`.
1177 /// use std::sync::atomic::AtomicBool;
1178 /// let atomic_bool = AtomicBool::from(true);
1179 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1182 fn from(b: bool) -> Self {
1187 #[cfg(target_has_atomic_load_store = "ptr")]
1188 #[stable(feature = "atomic_from", since = "1.23.0")]
1189 impl<T> From<*mut T> for AtomicPtr<T> {
1191 fn from(p: *mut T) -> Self {
1196 #[cfg(target_has_atomic_load_store = "8")]
1197 macro_rules! atomic_int {
1202 $stable_access:meta,
1206 $stable_init_const:meta,
1207 $s_int_type:expr, $int_ref:expr,
1208 $extra_feature:expr,
1209 $min_fn:ident, $max_fn:ident,
1212 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1213 /// An integer type which can be safely shared between threads.
1215 /// This type has the same in-memory representation as the underlying
1216 /// integer type, [`
1217 #[doc = $s_int_type]
1220 /// ). For more about the differences between atomic types and
1221 /// non-atomic types as well as information about the portability of
1222 /// this type, please see the [module-level documentation].
1224 /// [module-level documentation]: index.html
1226 #[repr(C, align($align))]
1227 pub struct $atomic_type {
1228 v: UnsafeCell<$int_type>,
1231 /// An atomic integer initialized to `0`.
1232 #[$stable_init_const]
1235 reason = "the `new` function is now preferred",
1236 suggestion = $atomic_new,
1238 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1241 impl Default for $atomic_type {
1242 fn default() -> Self {
1243 Self::new(Default::default())
1248 impl From<$int_type> for $atomic_type {
1251 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1253 fn from(v: $int_type) -> Self { Self::new(v) }
1258 impl fmt::Debug for $atomic_type {
1259 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1260 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1264 // Send is implicitly implemented.
1266 unsafe impl Sync for $atomic_type {}
1270 concat!("Creates a new atomic integer.
1275 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1277 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1282 pub const fn new(v: $int_type) -> Self {
1283 Self {v: UnsafeCell::new(v)}
1288 concat!("Returns a mutable reference to the underlying integer.
1290 This is safe because the mutable reference guarantees that no other threads are
1291 concurrently accessing the atomic data.
1296 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1298 let mut some_var = ", stringify!($atomic_type), "::new(10);
1299 assert_eq!(*some_var.get_mut(), 10);
1300 *some_var.get_mut() = 5;
1301 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1305 pub fn get_mut(&mut self) -> &mut $int_type {
1306 // SAFETY: the mutable reference guarantees unique ownership
1307 unsafe { &mut *self.v.get() }
1312 concat!("Consumes the atomic and returns the contained value.
1314 This is safe because passing `self` by value guarantees that no other threads are
1315 concurrently accessing the atomic data.
1320 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1322 let some_var = ", stringify!($atomic_type), "::new(5);
1323 assert_eq!(some_var.into_inner(), 5);
1327 pub fn into_inner(self) -> $int_type {
1333 concat!("Loads a value from the atomic integer.
1335 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1336 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1340 Panics if `order` is [`Release`] or [`AcqRel`].
1342 [`Ordering`]: enum.Ordering.html
1343 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1344 [`Release`]: enum.Ordering.html#variant.Release
1345 [`Acquire`]: enum.Ordering.html#variant.Acquire
1346 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1347 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1352 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1354 let some_var = ", stringify!($atomic_type), "::new(5);
1356 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1360 pub fn load(&self, order: Ordering) -> $int_type {
1361 // SAFETY: data races are prevented by atomic intrinsics
1362 unsafe { atomic_load(self.v.get(), order) }
1367 concat!("Stores a value into the atomic integer.
1369 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1370 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1374 Panics if `order` is [`Acquire`] or [`AcqRel`].
1376 [`Ordering`]: enum.Ordering.html
1377 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1378 [`Release`]: enum.Ordering.html#variant.Release
1379 [`Acquire`]: enum.Ordering.html#variant.Acquire
1380 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1381 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1386 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1388 let some_var = ", stringify!($atomic_type), "::new(5);
1390 some_var.store(10, Ordering::Relaxed);
1391 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1395 pub fn store(&self, val: $int_type, order: Ordering) {
1396 // SAFETY: data races are prevented by atomic intrinsics
1397 unsafe { atomic_store(self.v.get(), val, order); }
1402 concat!("Stores a value into the atomic integer, returning the previous value.
1404 `swap` takes an [`Ordering`] argument which describes the memory ordering
1405 of this operation. All ordering modes are possible. Note that using
1406 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1407 using [`Release`] makes the load part [`Relaxed`].
1409 [`Ordering`]: enum.Ordering.html
1410 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1411 [`Release`]: enum.Ordering.html#variant.Release
1412 [`Acquire`]: enum.Ordering.html#variant.Acquire
1417 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1419 let some_var = ", stringify!($atomic_type), "::new(5);
1421 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1426 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1427 // SAFETY: data races are prevented by atomic intrinsics
1428 unsafe { atomic_swap(self.v.get(), val, order) }
1433 concat!("Stores a value into the atomic integer if the current value is the same as
1434 the `current` value.
1436 The return value is always the previous value. If it is equal to `current`, then the
1439 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1440 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1441 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1442 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1443 happens, and using [`Release`] makes the load part [`Relaxed`].
1445 [`Ordering`]: enum.Ordering.html
1446 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1447 [`Release`]: enum.Ordering.html#variant.Release
1448 [`Acquire`]: enum.Ordering.html#variant.Acquire
1449 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1454 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1456 let some_var = ", stringify!($atomic_type), "::new(5);
1458 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1459 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1461 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1462 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1467 pub fn compare_and_swap(&self,
1470 order: Ordering) -> $int_type {
1471 match self.compare_exchange(current,
1474 strongest_failure_ordering(order)) {
1482 concat!("Stores a value into the atomic integer if the current value is the same as
1483 the `current` value.
1485 The return value is a result indicating whether the new value was written and
1486 containing the previous value. On success this value is guaranteed to be equal to
1489 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1490 ordering of this operation. The first describes the required ordering if the
1491 operation succeeds while the second describes the required ordering when the
1492 operation fails. Using [`Acquire`] as success ordering makes the store part
1493 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1494 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1495 and must be equivalent to or weaker than the success ordering.
1497 [`Ordering`]: enum.Ordering.html
1498 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1499 [`Release`]: enum.Ordering.html#variant.Release
1500 [`Acquire`]: enum.Ordering.html#variant.Acquire
1501 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1506 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1508 let some_var = ", stringify!($atomic_type), "::new(5);
1510 assert_eq!(some_var.compare_exchange(5, 10,
1514 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1516 assert_eq!(some_var.compare_exchange(6, 12,
1520 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1525 pub fn compare_exchange(&self,
1529 failure: Ordering) -> Result<$int_type, $int_type> {
1530 // SAFETY: data races are prevented by atomic intrinsics
1531 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1536 concat!("Stores a value into the atomic integer if the current value is the same as
1537 the `current` value.
1539 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1540 when the comparison succeeds, which can result in more efficient code on some
1541 platforms. The return value is a result indicating whether the new value was
1542 written and containing the previous value.
1544 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1545 ordering of this operation. The first describes the required ordering if the
1546 operation succeeds while the second describes the required ordering when the
1547 operation fails. Using [`Acquire`] as success ordering makes the store part
1548 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1549 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1550 and must be equivalent to or weaker than the success ordering.
1552 [`compare_exchange`]: #method.compare_exchange
1553 [`Ordering`]: enum.Ordering.html
1554 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1555 [`Release`]: enum.Ordering.html#variant.Release
1556 [`Acquire`]: enum.Ordering.html#variant.Acquire
1557 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1562 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1564 let val = ", stringify!($atomic_type), "::new(4);
1566 let mut old = val.load(Ordering::Relaxed);
1569 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1578 pub fn compare_exchange_weak(&self,
1582 failure: Ordering) -> Result<$int_type, $int_type> {
1583 // SAFETY: data races are prevented by atomic intrinsics
1585 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1591 concat!("Adds to the current value, returning the previous value.
1593 This operation wraps around on overflow.
1595 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1596 of this operation. All ordering modes are possible. Note that using
1597 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1598 using [`Release`] makes the load part [`Relaxed`].
1600 [`Ordering`]: enum.Ordering.html
1601 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1602 [`Release`]: enum.Ordering.html#variant.Release
1603 [`Acquire`]: enum.Ordering.html#variant.Acquire
1608 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1610 let foo = ", stringify!($atomic_type), "::new(0);
1611 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1612 assert_eq!(foo.load(Ordering::SeqCst), 10);
1617 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1618 // SAFETY: data races are prevented by atomic intrinsics
1619 unsafe { atomic_add(self.v.get(), val, order) }
1624 concat!("Subtracts from the current value, returning the previous value.
1626 This operation wraps around on overflow.
1628 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1629 of this operation. All ordering modes are possible. Note that using
1630 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1631 using [`Release`] makes the load part [`Relaxed`].
1633 [`Ordering`]: enum.Ordering.html
1634 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1635 [`Release`]: enum.Ordering.html#variant.Release
1636 [`Acquire`]: enum.Ordering.html#variant.Acquire
1641 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1643 let foo = ", stringify!($atomic_type), "::new(20);
1644 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1645 assert_eq!(foo.load(Ordering::SeqCst), 10);
1650 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1651 // SAFETY: data races are prevented by atomic intrinsics
1652 unsafe { atomic_sub(self.v.get(), val, order) }
1657 concat!("Bitwise \"and\" with the current value.
1659 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1660 sets the new value to the result.
1662 Returns the previous value.
1664 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1665 of this operation. All ordering modes are possible. Note that using
1666 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1667 using [`Release`] makes the load part [`Relaxed`].
1669 [`Ordering`]: enum.Ordering.html
1670 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1671 [`Release`]: enum.Ordering.html#variant.Release
1672 [`Acquire`]: enum.Ordering.html#variant.Acquire
1677 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1679 let foo = ", stringify!($atomic_type), "::new(0b101101);
1680 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1681 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1686 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1687 // SAFETY: data races are prevented by atomic intrinsics
1688 unsafe { atomic_and(self.v.get(), val, order) }
1693 concat!("Bitwise \"nand\" with the current value.
1695 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1696 sets the new value to the result.
1698 Returns the previous value.
1700 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1701 of this operation. All ordering modes are possible. Note that using
1702 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1703 using [`Release`] makes the load part [`Relaxed`].
1705 [`Ordering`]: enum.Ordering.html
1706 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1707 [`Release`]: enum.Ordering.html#variant.Release
1708 [`Acquire`]: enum.Ordering.html#variant.Acquire
1713 ", $extra_feature, "
1714 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1716 let foo = ", stringify!($atomic_type), "::new(0x13);
1717 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1718 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1723 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1724 // SAFETY: data races are prevented by atomic intrinsics
1725 unsafe { atomic_nand(self.v.get(), val, order) }
1730 concat!("Bitwise \"or\" with the current value.
1732 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1733 sets the new value to the result.
1735 Returns the previous value.
1737 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1738 of this operation. All ordering modes are possible. Note that using
1739 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1740 using [`Release`] makes the load part [`Relaxed`].
1742 [`Ordering`]: enum.Ordering.html
1743 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1744 [`Release`]: enum.Ordering.html#variant.Release
1745 [`Acquire`]: enum.Ordering.html#variant.Acquire
1750 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1752 let foo = ", stringify!($atomic_type), "::new(0b101101);
1753 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1754 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1759 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1760 // SAFETY: data races are prevented by atomic intrinsics
1761 unsafe { atomic_or(self.v.get(), val, order) }
1766 concat!("Bitwise \"xor\" with the current value.
1768 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1769 sets the new value to the result.
1771 Returns the previous value.
1773 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1774 of this operation. All ordering modes are possible. Note that using
1775 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1776 using [`Release`] makes the load part [`Relaxed`].
1778 [`Ordering`]: enum.Ordering.html
1779 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1780 [`Release`]: enum.Ordering.html#variant.Release
1781 [`Acquire`]: enum.Ordering.html#variant.Acquire
1786 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1788 let foo = ", stringify!($atomic_type), "::new(0b101101);
1789 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1790 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1795 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1796 // SAFETY: data races are prevented by atomic intrinsics
1797 unsafe { atomic_xor(self.v.get(), val, order) }
1802 concat!("Fetches the value, and applies a function to it that returns an optional
1803 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1804 `Err(previous_value)`.
1806 Note: This may call the function multiple times if the value has been changed from other threads in
1807 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1808 but once to the stored value.
1810 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1811 ordering of this operation. The first describes the required ordering for loads
1812 and failed updates while the second describes the required ordering when the
1813 operation finally succeeds. Beware that this is different from the two
1814 modes in [`compare_exchange`]!
1816 Using [`Acquire`] as success ordering makes the store part
1817 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1818 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1819 and must be equivalent to or weaker than the success ordering.
1821 [`bool`]: ../../../std/primitive.bool.html
1822 [`compare_exchange`]: #method.compare_exchange
1823 [`Ordering`]: enum.Ordering.html
1824 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1825 [`Release`]: enum.Ordering.html#variant.Release
1826 [`Acquire`]: enum.Ordering.html#variant.Acquire
1827 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1832 #![feature(no_more_cas)]
1833 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1835 let x = ", stringify!($atomic_type), "::new(7);
1836 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1837 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1838 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1839 assert_eq!(x.load(Ordering::SeqCst), 9);
1842 #[unstable(feature = "no_more_cas",
1843 reason = "no more CAS loops in user code",
1846 pub fn fetch_update<F>(&self,
1848 fetch_order: Ordering,
1849 set_order: Ordering) -> Result<$int_type, $int_type>
1850 where F: FnMut($int_type) -> Option<$int_type> {
1851 let mut prev = self.load(fetch_order);
1852 while let Some(next) = f(prev) {
1853 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1854 x @ Ok(_) => return x,
1855 Err(next_prev) => prev = next_prev
1863 concat!("Maximum with the current value.
1865 Finds the maximum of the current value and the argument `val`, and
1866 sets the new value to the result.
1868 Returns the previous value.
1870 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1871 of this operation. All ordering modes are possible. Note that using
1872 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1873 using [`Release`] makes the load part [`Relaxed`].
1875 [`Ordering`]: enum.Ordering.html
1876 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1877 [`Release`]: enum.Ordering.html#variant.Release
1878 [`Acquire`]: enum.Ordering.html#variant.Acquire
1883 #![feature(atomic_min_max)]
1884 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1886 let foo = ", stringify!($atomic_type), "::new(23);
1887 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1888 assert_eq!(foo.load(Ordering::SeqCst), 42);
1891 If you want to obtain the maximum value in one step, you can use the following:
1894 #![feature(atomic_min_max)]
1895 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1897 let foo = ", stringify!($atomic_type), "::new(23);
1899 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1900 assert!(max_foo == 42);
1903 #[unstable(feature = "atomic_min_max",
1904 reason = "easier and faster min/max than writing manual CAS loop",
1907 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1908 // SAFETY: data races are prevented by atomic intrinsics
1909 unsafe { $max_fn(self.v.get(), val, order) }
1914 concat!("Minimum with the current value.
1916 Finds the minimum of the current value and the argument `val`, and
1917 sets the new value to the result.
1919 Returns the previous value.
1921 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1922 of this operation. All ordering modes are possible. Note that using
1923 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1924 using [`Release`] makes the load part [`Relaxed`].
1926 [`Ordering`]: enum.Ordering.html
1927 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1928 [`Release`]: enum.Ordering.html#variant.Release
1929 [`Acquire`]: enum.Ordering.html#variant.Acquire
1934 #![feature(atomic_min_max)]
1935 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1937 let foo = ", stringify!($atomic_type), "::new(23);
1938 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1939 assert_eq!(foo.load(Ordering::Relaxed), 23);
1940 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1941 assert_eq!(foo.load(Ordering::Relaxed), 22);
1944 If you want to obtain the minimum value in one step, you can use the following:
1947 #![feature(atomic_min_max)]
1948 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1950 let foo = ", stringify!($atomic_type), "::new(23);
1952 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1953 assert_eq!(min_foo, 12);
1956 #[unstable(feature = "atomic_min_max",
1957 reason = "easier and faster min/max than writing manual CAS loop",
1960 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1961 // SAFETY: data races are prevented by atomic intrinsics
1962 unsafe { $min_fn(self.v.get(), val, order) }
1967 concat!("Returns a mutable pointer to the underlying integer.
1969 Doing non-atomic reads and writes on the resulting integer can be a data race.
1970 This method is mostly useful for FFI, where the function signature may use
1971 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1973 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1974 atomic types work with interior mutability. All modifications of an atomic change the value
1975 through a shared reference, and can do so safely as long as they use atomic operations. Any
1976 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
1977 restriction: operations on it must be atomic.
1981 ```ignore (extern-declaration)
1983 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1986 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1989 let mut atomic = ", stringify!($atomic_type), "::new(1);
1991 my_atomic_op(atomic.as_mut_ptr());
1996 #[unstable(feature = "atomic_mut_ptr",
1997 reason = "recently added",
1999 pub fn as_mut_ptr(&self) -> *mut $int_type {
2007 #[cfg(target_has_atomic_load_store = "8")]
2009 cfg(target_has_atomic = "8"),
2010 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2011 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2012 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2013 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2014 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2015 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2016 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2017 unstable(feature = "integer_atomics", issue = "32976"),
2018 "i8", "../../../std/primitive.i8.html",
2020 atomic_min, atomic_max,
2023 i8 AtomicI8 ATOMIC_I8_INIT
2025 #[cfg(target_has_atomic_load_store = "8")]
2027 cfg(target_has_atomic = "8"),
2028 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2029 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2030 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2031 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2032 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2033 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2034 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2035 unstable(feature = "integer_atomics", issue = "32976"),
2036 "u8", "../../../std/primitive.u8.html",
2038 atomic_umin, atomic_umax,
2041 u8 AtomicU8 ATOMIC_U8_INIT
2043 #[cfg(target_has_atomic_load_store = "16")]
2045 cfg(target_has_atomic = "16"),
2046 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2047 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2048 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2049 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2050 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2051 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2052 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2053 unstable(feature = "integer_atomics", issue = "32976"),
2054 "i16", "../../../std/primitive.i16.html",
2056 atomic_min, atomic_max,
2058 "AtomicI16::new(0)",
2059 i16 AtomicI16 ATOMIC_I16_INIT
2061 #[cfg(target_has_atomic_load_store = "16")]
2063 cfg(target_has_atomic = "16"),
2064 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2065 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2066 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2067 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2068 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2069 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2070 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2071 unstable(feature = "integer_atomics", issue = "32976"),
2072 "u16", "../../../std/primitive.u16.html",
2074 atomic_umin, atomic_umax,
2076 "AtomicU16::new(0)",
2077 u16 AtomicU16 ATOMIC_U16_INIT
2079 #[cfg(target_has_atomic_load_store = "32")]
2081 cfg(target_has_atomic = "32"),
2082 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2083 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2084 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2085 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2086 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2087 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2088 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2089 unstable(feature = "integer_atomics", issue = "32976"),
2090 "i32", "../../../std/primitive.i32.html",
2092 atomic_min, atomic_max,
2094 "AtomicI32::new(0)",
2095 i32 AtomicI32 ATOMIC_I32_INIT
2097 #[cfg(target_has_atomic_load_store = "32")]
2099 cfg(target_has_atomic = "32"),
2100 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2101 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2102 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2103 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2104 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2105 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2106 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2107 unstable(feature = "integer_atomics", issue = "32976"),
2108 "u32", "../../../std/primitive.u32.html",
2110 atomic_umin, atomic_umax,
2112 "AtomicU32::new(0)",
2113 u32 AtomicU32 ATOMIC_U32_INIT
2115 #[cfg(target_has_atomic_load_store = "64")]
2117 cfg(target_has_atomic = "64"),
2118 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2119 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2120 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2121 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2122 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2123 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2124 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2125 unstable(feature = "integer_atomics", issue = "32976"),
2126 "i64", "../../../std/primitive.i64.html",
2128 atomic_min, atomic_max,
2130 "AtomicI64::new(0)",
2131 i64 AtomicI64 ATOMIC_I64_INIT
2133 #[cfg(target_has_atomic_load_store = "64")]
2135 cfg(target_has_atomic = "64"),
2136 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2137 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2138 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2139 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2140 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2141 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2142 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2143 unstable(feature = "integer_atomics", issue = "32976"),
2144 "u64", "../../../std/primitive.u64.html",
2146 atomic_umin, atomic_umax,
2148 "AtomicU64::new(0)",
2149 u64 AtomicU64 ATOMIC_U64_INIT
2151 #[cfg(target_has_atomic_load_store = "128")]
2153 cfg(target_has_atomic = "128"),
2154 unstable(feature = "integer_atomics", issue = "32976"),
2155 unstable(feature = "integer_atomics", issue = "32976"),
2156 unstable(feature = "integer_atomics", issue = "32976"),
2157 unstable(feature = "integer_atomics", issue = "32976"),
2158 unstable(feature = "integer_atomics", issue = "32976"),
2159 unstable(feature = "integer_atomics", issue = "32976"),
2160 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2161 unstable(feature = "integer_atomics", issue = "32976"),
2162 "i128", "../../../std/primitive.i128.html",
2163 "#![feature(integer_atomics)]\n\n",
2164 atomic_min, atomic_max,
2166 "AtomicI128::new(0)",
2167 i128 AtomicI128 ATOMIC_I128_INIT
2169 #[cfg(target_has_atomic_load_store = "128")]
2171 cfg(target_has_atomic = "128"),
2172 unstable(feature = "integer_atomics", issue = "32976"),
2173 unstable(feature = "integer_atomics", issue = "32976"),
2174 unstable(feature = "integer_atomics", issue = "32976"),
2175 unstable(feature = "integer_atomics", issue = "32976"),
2176 unstable(feature = "integer_atomics", issue = "32976"),
2177 unstable(feature = "integer_atomics", issue = "32976"),
2178 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2179 unstable(feature = "integer_atomics", issue = "32976"),
2180 "u128", "../../../std/primitive.u128.html",
2181 "#![feature(integer_atomics)]\n\n",
2182 atomic_umin, atomic_umax,
2184 "AtomicU128::new(0)",
2185 u128 AtomicU128 ATOMIC_U128_INIT
2187 #[cfg(target_has_atomic_load_store = "ptr")]
2188 #[cfg(target_pointer_width = "16")]
2189 macro_rules! ptr_width {
2194 #[cfg(target_has_atomic_load_store = "ptr")]
2195 #[cfg(target_pointer_width = "32")]
2196 macro_rules! ptr_width {
2201 #[cfg(target_has_atomic_load_store = "ptr")]
2202 #[cfg(target_pointer_width = "64")]
2203 macro_rules! ptr_width {
2208 #[cfg(target_has_atomic_load_store = "ptr")]
2210 cfg(target_has_atomic = "ptr"),
2211 stable(feature = "rust1", since = "1.0.0"),
2212 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2213 stable(feature = "atomic_debug", since = "1.3.0"),
2214 stable(feature = "atomic_access", since = "1.15.0"),
2215 stable(feature = "atomic_from", since = "1.23.0"),
2216 stable(feature = "atomic_nand", since = "1.27.0"),
2217 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2218 stable(feature = "rust1", since = "1.0.0"),
2219 "isize", "../../../std/primitive.isize.html",
2221 atomic_min, atomic_max,
2223 "AtomicIsize::new(0)",
2224 isize AtomicIsize ATOMIC_ISIZE_INIT
2226 #[cfg(target_has_atomic_load_store = "ptr")]
2228 cfg(target_has_atomic = "ptr"),
2229 stable(feature = "rust1", since = "1.0.0"),
2230 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2231 stable(feature = "atomic_debug", since = "1.3.0"),
2232 stable(feature = "atomic_access", since = "1.15.0"),
2233 stable(feature = "atomic_from", since = "1.23.0"),
2234 stable(feature = "atomic_nand", since = "1.27.0"),
2235 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2236 stable(feature = "rust1", since = "1.0.0"),
2237 "usize", "../../../std/primitive.usize.html",
2239 atomic_umin, atomic_umax,
2241 "AtomicUsize::new(0)",
2242 usize AtomicUsize ATOMIC_USIZE_INIT
2246 #[cfg(target_has_atomic = "8")]
2247 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2258 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2260 Release => intrinsics::atomic_store_rel(dst, val),
2261 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2262 SeqCst => intrinsics::atomic_store(dst, val),
2263 Acquire => panic!("there is no such thing as an acquire store"),
2264 AcqRel => panic!("there is no such thing as an acquire/release store"),
2269 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2271 Acquire => intrinsics::atomic_load_acq(dst),
2272 Relaxed => intrinsics::atomic_load_relaxed(dst),
2273 SeqCst => intrinsics::atomic_load(dst),
2274 Release => panic!("there is no such thing as a release load"),
2275 AcqRel => panic!("there is no such thing as an acquire/release load"),
2280 #[cfg(target_has_atomic = "8")]
2281 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2283 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2284 Release => intrinsics::atomic_xchg_rel(dst, val),
2285 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2286 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2287 SeqCst => intrinsics::atomic_xchg(dst, val),
2291 /// Returns the previous value (like __sync_fetch_and_add).
2293 #[cfg(target_has_atomic = "8")]
2294 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2296 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2297 Release => intrinsics::atomic_xadd_rel(dst, val),
2298 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2299 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2300 SeqCst => intrinsics::atomic_xadd(dst, val),
2304 /// Returns the previous value (like __sync_fetch_and_sub).
2306 #[cfg(target_has_atomic = "8")]
2307 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2309 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2310 Release => intrinsics::atomic_xsub_rel(dst, val),
2311 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2312 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2313 SeqCst => intrinsics::atomic_xsub(dst, val),
2318 #[cfg(target_has_atomic = "8")]
2319 unsafe fn atomic_compare_exchange<T>(
2326 let (val, ok) = match (success, failure) {
2327 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2328 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2329 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2330 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2331 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2332 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2333 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2334 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2335 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2336 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2337 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2338 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2340 if ok { Ok(val) } else { Err(val) }
2344 #[cfg(target_has_atomic = "8")]
2345 unsafe fn atomic_compare_exchange_weak<T>(
2352 let (val, ok) = match (success, failure) {
2353 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2354 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2355 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2356 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2357 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2358 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2359 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2360 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2361 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2362 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2363 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2364 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2366 if ok { Ok(val) } else { Err(val) }
2370 #[cfg(target_has_atomic = "8")]
2371 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2373 Acquire => intrinsics::atomic_and_acq(dst, val),
2374 Release => intrinsics::atomic_and_rel(dst, val),
2375 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2376 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2377 SeqCst => intrinsics::atomic_and(dst, val),
2382 #[cfg(target_has_atomic = "8")]
2383 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2385 Acquire => intrinsics::atomic_nand_acq(dst, val),
2386 Release => intrinsics::atomic_nand_rel(dst, val),
2387 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2388 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2389 SeqCst => intrinsics::atomic_nand(dst, val),
2394 #[cfg(target_has_atomic = "8")]
2395 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2397 Acquire => intrinsics::atomic_or_acq(dst, val),
2398 Release => intrinsics::atomic_or_rel(dst, val),
2399 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2400 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2401 SeqCst => intrinsics::atomic_or(dst, val),
2406 #[cfg(target_has_atomic = "8")]
2407 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2409 Acquire => intrinsics::atomic_xor_acq(dst, val),
2410 Release => intrinsics::atomic_xor_rel(dst, val),
2411 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2412 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2413 SeqCst => intrinsics::atomic_xor(dst, val),
2417 /// returns the max value (signed comparison)
2419 #[cfg(target_has_atomic = "8")]
2420 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2422 Acquire => intrinsics::atomic_max_acq(dst, val),
2423 Release => intrinsics::atomic_max_rel(dst, val),
2424 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2425 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2426 SeqCst => intrinsics::atomic_max(dst, val),
2430 /// returns the min value (signed comparison)
2432 #[cfg(target_has_atomic = "8")]
2433 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2435 Acquire => intrinsics::atomic_min_acq(dst, val),
2436 Release => intrinsics::atomic_min_rel(dst, val),
2437 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2438 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2439 SeqCst => intrinsics::atomic_min(dst, val),
2443 /// returns the max value (signed comparison)
2445 #[cfg(target_has_atomic = "8")]
2446 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2448 Acquire => intrinsics::atomic_umax_acq(dst, val),
2449 Release => intrinsics::atomic_umax_rel(dst, val),
2450 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2451 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2452 SeqCst => intrinsics::atomic_umax(dst, val),
2456 /// returns the min value (signed comparison)
2458 #[cfg(target_has_atomic = "8")]
2459 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2461 Acquire => intrinsics::atomic_umin_acq(dst, val),
2462 Release => intrinsics::atomic_umin_rel(dst, val),
2463 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2464 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2465 SeqCst => intrinsics::atomic_umin(dst, val),
2469 /// An atomic fence.
2471 /// Depending on the specified order, a fence prevents the compiler and CPU from
2472 /// reordering certain types of memory operations around it.
2473 /// That creates synchronizes-with relationships between it and atomic operations
2474 /// or fences in other threads.
2476 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2477 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2478 /// exist operations X and Y, both operating on some atomic object 'M' such
2479 /// that A is sequenced before X, Y is synchronized before B and Y observes
2480 /// the change to M. This provides a happens-before dependence between A and B.
2483 /// Thread 1 Thread 2
2485 /// fence(Release); A --------------
2486 /// x.store(3, Relaxed); X --------- |
2489 /// -------------> Y if x.load(Relaxed) == 3 {
2490 /// |-------> B fence(Acquire);
2495 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2498 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2499 /// and [`Release`] semantics, participates in the global program order of the
2500 /// other [`SeqCst`] operations and/or fences.
2502 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2506 /// Panics if `order` is [`Relaxed`].
2511 /// use std::sync::atomic::AtomicBool;
2512 /// use std::sync::atomic::fence;
2513 /// use std::sync::atomic::Ordering;
2515 /// // A mutual exclusion primitive based on spinlock.
2516 /// pub struct Mutex {
2517 /// flag: AtomicBool,
2521 /// pub fn new() -> Mutex {
2523 /// flag: AtomicBool::new(false),
2527 /// pub fn lock(&self) {
2528 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2529 /// // This fence synchronizes-with store in `unlock`.
2530 /// fence(Ordering::Acquire);
2533 /// pub fn unlock(&self) {
2534 /// self.flag.store(false, Ordering::Release);
2539 /// [`Ordering`]: enum.Ordering.html
2540 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2541 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2542 /// [`Release`]: enum.Ordering.html#variant.Release
2543 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2544 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2546 #[stable(feature = "rust1", since = "1.0.0")]
2547 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2548 pub fn fence(order: Ordering) {
2549 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2550 // they will cause LLVM to abort. The wasm instruction set doesn't have
2551 // fences right now. There's discussion online about the best way for tools
2552 // to conventionally implement fences at
2553 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2554 // follow that discussion and implement a solution when one comes about!
2555 #[cfg(not(target_arch = "wasm32"))]
2556 // SAFETY: using an atomic fence is safe
2559 Acquire => intrinsics::atomic_fence_acq(),
2560 Release => intrinsics::atomic_fence_rel(),
2561 AcqRel => intrinsics::atomic_fence_acqrel(),
2562 SeqCst => intrinsics::atomic_fence(),
2563 Relaxed => panic!("there is no such thing as a relaxed fence"),
2568 /// A compiler memory fence.
2570 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2571 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2572 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2573 /// or writes from before or after the call to the other side of the call to
2574 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2575 /// from doing such re-ordering. This is not a problem in a single-threaded,
2576 /// execution context, but when other threads may modify memory at the same
2577 /// time, stronger synchronization primitives such as [`fence`] are required.
2579 /// The re-ordering prevented by the different ordering semantics are:
2581 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2582 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2583 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2584 /// - with [`AcqRel`], both of the above rules are enforced.
2586 /// `compiler_fence` is generally only useful for preventing a thread from
2587 /// racing *with itself*. That is, if a given thread is executing one piece
2588 /// of code, and is then interrupted, and starts executing code elsewhere
2589 /// (while still in the same thread, and conceptually still on the same
2590 /// core). In traditional programs, this can only occur when a signal
2591 /// handler is registered. In more low-level code, such situations can also
2592 /// arise when handling interrupts, when implementing green threads with
2593 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2594 /// discussion of [memory barriers].
2598 /// Panics if `order` is [`Relaxed`].
2602 /// Without `compiler_fence`, the `assert_eq!` in following code
2603 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2604 /// To see why, remember that the compiler is free to swap the stores to
2605 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2606 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2607 /// after `IS_READY` is updated, then the signal handler will see
2608 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2609 /// Using a `compiler_fence` remedies this situation.
2612 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2613 /// use std::sync::atomic::Ordering;
2614 /// use std::sync::atomic::compiler_fence;
2616 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2617 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2620 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2621 /// // prevent earlier writes from being moved beyond this point
2622 /// compiler_fence(Ordering::Release);
2623 /// IS_READY.store(true, Ordering::Relaxed);
2626 /// fn signal_handler() {
2627 /// if IS_READY.load(Ordering::Relaxed) {
2628 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2633 /// [`fence`]: fn.fence.html
2634 /// [`Ordering`]: enum.Ordering.html
2635 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2636 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2637 /// [`Release`]: enum.Ordering.html#variant.Release
2638 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2639 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2640 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2642 #[stable(feature = "compiler_fences", since = "1.21.0")]
2643 pub fn compiler_fence(order: Ordering) {
2644 // SAFETY: doesn't compile to machine code
2647 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2648 Release => intrinsics::atomic_singlethreadfence_rel(),
2649 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2650 SeqCst => intrinsics::atomic_singlethreadfence(),
2651 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2656 #[cfg(target_has_atomic_load_store = "8")]
2657 #[stable(feature = "atomic_debug", since = "1.3.0")]
2658 impl fmt::Debug for AtomicBool {
2659 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2660 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2664 #[cfg(target_has_atomic_load_store = "ptr")]
2665 #[stable(feature = "atomic_debug", since = "1.3.0")]
2666 impl<T> fmt::Debug for AtomicPtr<T> {
2667 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2668 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2672 #[cfg(target_has_atomic_load_store = "ptr")]
2673 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2674 impl<T> fmt::Pointer for AtomicPtr<T> {
2675 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2676 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)