3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 #![stable(feature = "rust1", since = "1.0.0")]
116 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
117 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
119 use self::Ordering::*;
121 use crate::cell::UnsafeCell;
123 use crate::intrinsics;
125 use crate::hint::spin_loop;
127 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
129 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
130 /// power or switching hyper-threads.
132 /// This function is different from [`std::thread::yield_now`] which directly yields to the
133 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
135 /// A common use case for `spin_loop_hint` is implementing bounded optimistic spinning in a CAS
136 /// loop in synchronization primitives. To avoid problems like priority inversion, it is strongly
137 /// recommended that the spin loop is terminated after a finite amount of iterations and an
138 /// appropriate blocking syscall is made.
140 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
141 /// do anything at all.
143 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
144 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
145 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
147 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
148 pub fn spin_loop_hint() {
152 /// A boolean type which can be safely shared between threads.
154 /// This type has the same in-memory representation as a [`bool`].
156 /// [`bool`]: ../../../std/primitive.bool.html
157 #[cfg(target_has_atomic_load_store = "8")]
158 #[stable(feature = "rust1", since = "1.0.0")]
160 pub struct AtomicBool {
164 #[cfg(target_has_atomic_load_store = "8")]
165 #[stable(feature = "rust1", since = "1.0.0")]
166 impl Default for AtomicBool {
167 /// Creates an `AtomicBool` initialized to `false`.
168 fn default() -> Self {
173 // Send is implicitly implemented for AtomicBool.
174 #[cfg(target_has_atomic_load_store = "8")]
175 #[stable(feature = "rust1", since = "1.0.0")]
176 unsafe impl Sync for AtomicBool {}
178 /// A raw pointer type which can be safely shared between threads.
180 /// This type has the same in-memory representation as a `*mut T`.
181 #[cfg(target_has_atomic_load_store = "ptr")]
182 #[stable(feature = "rust1", since = "1.0.0")]
183 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
184 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
185 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
186 pub struct AtomicPtr<T> {
187 p: UnsafeCell<*mut T>,
190 #[cfg(target_has_atomic_load_store = "ptr")]
191 #[stable(feature = "rust1", since = "1.0.0")]
192 impl<T> Default for AtomicPtr<T> {
193 /// Creates a null `AtomicPtr<T>`.
194 fn default() -> AtomicPtr<T> {
195 AtomicPtr::new(crate::ptr::null_mut())
199 #[cfg(target_has_atomic_load_store = "ptr")]
200 #[stable(feature = "rust1", since = "1.0.0")]
201 unsafe impl<T> Send for AtomicPtr<T> {}
202 #[cfg(target_has_atomic_load_store = "ptr")]
203 #[stable(feature = "rust1", since = "1.0.0")]
204 unsafe impl<T> Sync for AtomicPtr<T> {}
206 /// Atomic memory orderings
208 /// Memory orderings specify the way atomic operations synchronize memory.
209 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
210 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
211 /// operations synchronize other memory while additionally preserving a total order of such
212 /// operations across all threads.
214 /// Rust's memory orderings are [the same as those of
215 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
217 /// For more information see the [nomicon].
219 /// [nomicon]: ../../../nomicon/atomics.html
220 /// [Ordering::Relaxed]: #variant.Relaxed
221 /// [Ordering::SeqCst]: #variant.SeqCst
222 #[stable(feature = "rust1", since = "1.0.0")]
223 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
226 /// No ordering constraints, only atomic operations.
228 /// Corresponds to [`memory_order_relaxed`] in C++20.
230 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
231 #[stable(feature = "rust1", since = "1.0.0")]
233 /// When coupled with a store, all previous operations become ordered
234 /// before any load of this value with [`Acquire`] (or stronger) ordering.
235 /// In particular, all previous writes become visible to all threads
236 /// that perform an [`Acquire`] (or stronger) load of this value.
238 /// Notice that using this ordering for an operation that combines loads
239 /// and stores leads to a [`Relaxed`] load operation!
241 /// This ordering is only applicable for operations that can perform a store.
243 /// Corresponds to [`memory_order_release`] in C++20.
245 /// [`Release`]: #variant.Release
246 /// [`Acquire`]: #variant.Acquire
247 /// [`Relaxed`]: #variant.Relaxed
248 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
249 #[stable(feature = "rust1", since = "1.0.0")]
251 /// When coupled with a load, if the loaded value was written by a store operation with
252 /// [`Release`] (or stronger) ordering, then all subsequent operations
253 /// become ordered after that store. In particular, all subsequent loads will see data
254 /// written before the store.
256 /// Notice that using this ordering for an operation that combines loads
257 /// and stores leads to a [`Relaxed`] store operation!
259 /// This ordering is only applicable for operations that can perform a load.
261 /// Corresponds to [`memory_order_acquire`] in C++20.
263 /// [`Acquire`]: #variant.Acquire
264 /// [`Release`]: #variant.Release
265 /// [`Relaxed`]: #variant.Relaxed
266 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
267 #[stable(feature = "rust1", since = "1.0.0")]
269 /// Has the effects of both [`Acquire`] and [`Release`] together:
270 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
272 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
273 /// not performing any store and hence it has just [`Acquire`] ordering. However,
274 /// `AcqRel` will never perform [`Relaxed`] accesses.
276 /// This ordering is only applicable for operations that combine both loads and stores.
278 /// Corresponds to [`memory_order_acq_rel`] in C++20.
280 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
281 /// [`Acquire`]: #variant.Acquire
282 /// [`Release`]: #variant.Release
283 /// [`Relaxed`]: #variant.Relaxed
284 #[stable(feature = "rust1", since = "1.0.0")]
286 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
287 /// operations, respectively) with the additional guarantee that all threads see all
288 /// sequentially consistent operations in the same order.
290 /// Corresponds to [`memory_order_seq_cst`] in C++20.
292 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
293 /// [`Acquire`]: #variant.Acquire
294 /// [`Release`]: #variant.Release
295 /// [`AcqRel`]: #variant.AcqRel
296 #[stable(feature = "rust1", since = "1.0.0")]
300 /// An [`AtomicBool`] initialized to `false`.
302 /// [`AtomicBool`]: struct.AtomicBool.html
303 #[cfg(target_has_atomic_load_store = "8")]
304 #[stable(feature = "rust1", since = "1.0.0")]
307 reason = "the `new` function is now preferred",
308 suggestion = "AtomicBool::new(false)"
310 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
312 #[cfg(target_has_atomic_load_store = "8")]
314 /// Creates a new `AtomicBool`.
319 /// use std::sync::atomic::AtomicBool;
321 /// let atomic_true = AtomicBool::new(true);
322 /// let atomic_false = AtomicBool::new(false);
325 #[stable(feature = "rust1", since = "1.0.0")]
326 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
327 pub const fn new(v: bool) -> AtomicBool {
328 AtomicBool { v: UnsafeCell::new(v as u8) }
331 /// Returns a mutable reference to the underlying [`bool`].
333 /// This is safe because the mutable reference guarantees that no other threads are
334 /// concurrently accessing the atomic data.
336 /// [`bool`]: ../../../std/primitive.bool.html
341 /// use std::sync::atomic::{AtomicBool, Ordering};
343 /// let mut some_bool = AtomicBool::new(true);
344 /// assert_eq!(*some_bool.get_mut(), true);
345 /// *some_bool.get_mut() = false;
346 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
349 #[stable(feature = "atomic_access", since = "1.15.0")]
350 pub fn get_mut(&mut self) -> &mut bool {
351 // SAFETY: the mutable reference guarantees unique ownership.
352 unsafe { &mut *(self.v.get() as *mut bool) }
355 /// Consumes the atomic and returns the contained value.
357 /// This is safe because passing `self` by value guarantees that no other threads are
358 /// concurrently accessing the atomic data.
363 /// use std::sync::atomic::AtomicBool;
365 /// let some_bool = AtomicBool::new(true);
366 /// assert_eq!(some_bool.into_inner(), true);
369 #[stable(feature = "atomic_access", since = "1.15.0")]
370 pub fn into_inner(self) -> bool {
371 self.v.into_inner() != 0
374 /// Loads a value from the bool.
376 /// `load` takes an [`Ordering`] argument which describes the memory ordering
377 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
381 /// Panics if `order` is [`Release`] or [`AcqRel`].
383 /// [`Ordering`]: enum.Ordering.html
384 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
385 /// [`Release`]: enum.Ordering.html#variant.Release
386 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
387 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
388 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
393 /// use std::sync::atomic::{AtomicBool, Ordering};
395 /// let some_bool = AtomicBool::new(true);
397 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
400 #[stable(feature = "rust1", since = "1.0.0")]
401 pub fn load(&self, order: Ordering) -> bool {
402 // SAFETY: any data races are prevented by atomic intrinsics and the raw
403 // pointer passed in is valid because we got it from a reference.
404 unsafe { atomic_load(self.v.get(), order) != 0 }
407 /// Stores a value into the bool.
409 /// `store` takes an [`Ordering`] argument which describes the memory ordering
410 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
414 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
416 /// [`Ordering`]: enum.Ordering.html
417 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
418 /// [`Release`]: enum.Ordering.html#variant.Release
419 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
420 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
421 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
426 /// use std::sync::atomic::{AtomicBool, Ordering};
428 /// let some_bool = AtomicBool::new(true);
430 /// some_bool.store(false, Ordering::Relaxed);
431 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
434 #[stable(feature = "rust1", since = "1.0.0")]
435 pub fn store(&self, val: bool, order: Ordering) {
436 // SAFETY: any data races are prevented by atomic intrinsics and the raw
437 // pointer passed in is valid because we got it from a reference.
439 atomic_store(self.v.get(), val as u8, order);
443 /// Stores a value into the bool, returning the previous value.
445 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
446 /// of this operation. All ordering modes are possible. Note that using
447 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
448 /// using [`Release`] makes the load part [`Relaxed`].
450 /// [`Ordering`]: enum.Ordering.html
451 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
452 /// [`Release`]: enum.Ordering.html#variant.Release
453 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
458 /// use std::sync::atomic::{AtomicBool, Ordering};
460 /// let some_bool = AtomicBool::new(true);
462 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
463 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
466 #[stable(feature = "rust1", since = "1.0.0")]
467 #[cfg(target_has_atomic = "8")]
468 pub fn swap(&self, val: bool, order: Ordering) -> bool {
469 // SAFETY: data races are prevented by atomic intrinsics.
470 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
473 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
475 /// The return value is always the previous value. If it is equal to `current`, then the value
478 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
479 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
480 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
481 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
482 /// happens, and using [`Release`] makes the load part [`Relaxed`].
484 /// [`Ordering`]: enum.Ordering.html
485 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
486 /// [`Release`]: enum.Ordering.html#variant.Release
487 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
488 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
489 /// [`bool`]: ../../../std/primitive.bool.html
494 /// use std::sync::atomic::{AtomicBool, Ordering};
496 /// let some_bool = AtomicBool::new(true);
498 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
499 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
501 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
502 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
505 #[stable(feature = "rust1", since = "1.0.0")]
506 #[cfg(target_has_atomic = "8")]
507 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
508 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
514 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
516 /// The return value is a result indicating whether the new value was written and containing
517 /// the previous value. On success this value is guaranteed to be equal to `current`.
519 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
520 /// ordering of this operation. The first describes the required ordering if the
521 /// operation succeeds while the second describes the required ordering when the
522 /// operation fails. Using [`Acquire`] as success ordering makes the store part
523 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
524 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
525 /// and must be equivalent to or weaker than the success ordering.
528 /// [`bool`]: ../../../std/primitive.bool.html
529 /// [`Ordering`]: enum.Ordering.html
530 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
531 /// [`Release`]: enum.Ordering.html#variant.Release
532 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
533 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
538 /// use std::sync::atomic::{AtomicBool, Ordering};
540 /// let some_bool = AtomicBool::new(true);
542 /// assert_eq!(some_bool.compare_exchange(true,
544 /// Ordering::Acquire,
545 /// Ordering::Relaxed),
547 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
549 /// assert_eq!(some_bool.compare_exchange(true, true,
550 /// Ordering::SeqCst,
551 /// Ordering::Acquire),
553 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
556 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
557 #[cfg(target_has_atomic = "8")]
558 pub fn compare_exchange(
564 ) -> Result<bool, bool> {
565 // SAFETY: data races are prevented by atomic intrinsics.
567 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
570 Err(x) => Err(x != 0),
574 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
576 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
577 /// comparison succeeds, which can result in more efficient code on some platforms. The
578 /// return value is a result indicating whether the new value was written and containing the
581 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
582 /// ordering of this operation. The first describes the required ordering if the
583 /// operation succeeds while the second describes the required ordering when the
584 /// operation fails. Using [`Acquire`] as success ordering makes the store part
585 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
586 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
587 /// and must be equivalent to or weaker than the success ordering.
589 /// [`bool`]: ../../../std/primitive.bool.html
590 /// [`compare_exchange`]: #method.compare_exchange
591 /// [`Ordering`]: enum.Ordering.html
592 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
593 /// [`Release`]: enum.Ordering.html#variant.Release
594 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
595 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
600 /// use std::sync::atomic::{AtomicBool, Ordering};
602 /// let val = AtomicBool::new(false);
605 /// let mut old = val.load(Ordering::Relaxed);
607 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
609 /// Err(x) => old = x,
614 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
615 #[cfg(target_has_atomic = "8")]
616 pub fn compare_exchange_weak(
622 ) -> Result<bool, bool> {
623 // SAFETY: data races are prevented by atomic intrinsics.
625 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
628 Err(x) => Err(x != 0),
632 /// Logical "and" with a boolean value.
634 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
635 /// the new value to the result.
637 /// Returns the previous value.
639 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
640 /// of this operation. All ordering modes are possible. Note that using
641 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
642 /// using [`Release`] makes the load part [`Relaxed`].
644 /// [`Ordering`]: enum.Ordering.html
645 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
646 /// [`Release`]: enum.Ordering.html#variant.Release
647 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
652 /// use std::sync::atomic::{AtomicBool, Ordering};
654 /// let foo = AtomicBool::new(true);
655 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
656 /// assert_eq!(foo.load(Ordering::SeqCst), false);
658 /// let foo = AtomicBool::new(true);
659 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
660 /// assert_eq!(foo.load(Ordering::SeqCst), true);
662 /// let foo = AtomicBool::new(false);
663 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
664 /// assert_eq!(foo.load(Ordering::SeqCst), false);
667 #[stable(feature = "rust1", since = "1.0.0")]
668 #[cfg(target_has_atomic = "8")]
669 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
670 // SAFETY: data races are prevented by atomic intrinsics.
671 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
674 /// Logical "nand" with a boolean value.
676 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
677 /// the new value to the result.
679 /// Returns the previous value.
681 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
682 /// of this operation. All ordering modes are possible. Note that using
683 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
684 /// using [`Release`] makes the load part [`Relaxed`].
686 /// [`Ordering`]: enum.Ordering.html
687 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
688 /// [`Release`]: enum.Ordering.html#variant.Release
689 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
694 /// use std::sync::atomic::{AtomicBool, Ordering};
696 /// let foo = AtomicBool::new(true);
697 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
698 /// assert_eq!(foo.load(Ordering::SeqCst), true);
700 /// let foo = AtomicBool::new(true);
701 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
702 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
703 /// assert_eq!(foo.load(Ordering::SeqCst), false);
705 /// let foo = AtomicBool::new(false);
706 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
707 /// assert_eq!(foo.load(Ordering::SeqCst), true);
710 #[stable(feature = "rust1", since = "1.0.0")]
711 #[cfg(target_has_atomic = "8")]
712 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
713 // We can't use atomic_nand here because it can result in a bool with
714 // an invalid value. This happens because the atomic operation is done
715 // with an 8-bit integer internally, which would set the upper 7 bits.
716 // So we just use fetch_xor or swap instead.
719 // We must invert the bool.
720 self.fetch_xor(true, order)
722 // !(x & false) == true
723 // We must set the bool to true.
724 self.swap(true, order)
728 /// Logical "or" with a boolean value.
730 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
731 /// new value to the result.
733 /// Returns the previous value.
735 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
736 /// of this operation. All ordering modes are possible. Note that using
737 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
738 /// using [`Release`] makes the load part [`Relaxed`].
740 /// [`Ordering`]: enum.Ordering.html
741 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
742 /// [`Release`]: enum.Ordering.html#variant.Release
743 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
748 /// use std::sync::atomic::{AtomicBool, Ordering};
750 /// let foo = AtomicBool::new(true);
751 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
752 /// assert_eq!(foo.load(Ordering::SeqCst), true);
754 /// let foo = AtomicBool::new(true);
755 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
756 /// assert_eq!(foo.load(Ordering::SeqCst), true);
758 /// let foo = AtomicBool::new(false);
759 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
760 /// assert_eq!(foo.load(Ordering::SeqCst), false);
763 #[stable(feature = "rust1", since = "1.0.0")]
764 #[cfg(target_has_atomic = "8")]
765 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
766 // SAFETY: data races are prevented by atomic intrinsics.
767 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
770 /// Logical "xor" with a boolean value.
772 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
773 /// the new value to the result.
775 /// Returns the previous value.
777 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
778 /// of this operation. All ordering modes are possible. Note that using
779 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
780 /// using [`Release`] makes the load part [`Relaxed`].
782 /// [`Ordering`]: enum.Ordering.html
783 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
784 /// [`Release`]: enum.Ordering.html#variant.Release
785 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
790 /// use std::sync::atomic::{AtomicBool, Ordering};
792 /// let foo = AtomicBool::new(true);
793 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
794 /// assert_eq!(foo.load(Ordering::SeqCst), true);
796 /// let foo = AtomicBool::new(true);
797 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
798 /// assert_eq!(foo.load(Ordering::SeqCst), false);
800 /// let foo = AtomicBool::new(false);
801 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
802 /// assert_eq!(foo.load(Ordering::SeqCst), false);
805 #[stable(feature = "rust1", since = "1.0.0")]
806 #[cfg(target_has_atomic = "8")]
807 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
808 // SAFETY: data races are prevented by atomic intrinsics.
809 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
812 /// Returns a mutable pointer to the underlying [`bool`].
814 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
815 /// This method is mostly useful for FFI, where the function signature may use
816 /// `*mut bool` instead of `&AtomicBool`.
818 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
819 /// atomic types work with interior mutability. All modifications of an atomic change the value
820 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
821 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
822 /// restriction: operations on it must be atomic.
824 /// [`bool`]: ../../../std/primitive.bool.html
828 /// ```ignore (extern-declaration)
830 /// use std::sync::atomic::AtomicBool;
832 /// fn my_atomic_op(arg: *mut bool);
835 /// let mut atomic = AtomicBool::new(true);
837 /// my_atomic_op(atomic.as_mut_ptr());
842 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
843 pub fn as_mut_ptr(&self) -> *mut bool {
844 self.v.get() as *mut bool
848 #[cfg(target_has_atomic_load_store = "ptr")]
849 impl<T> AtomicPtr<T> {
850 /// Creates a new `AtomicPtr`.
855 /// use std::sync::atomic::AtomicPtr;
857 /// let ptr = &mut 5;
858 /// let atomic_ptr = AtomicPtr::new(ptr);
861 #[stable(feature = "rust1", since = "1.0.0")]
862 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
863 pub const fn new(p: *mut T) -> AtomicPtr<T> {
864 AtomicPtr { p: UnsafeCell::new(p) }
867 /// Returns a mutable reference to the underlying pointer.
869 /// This is safe because the mutable reference guarantees that no other threads are
870 /// concurrently accessing the atomic data.
875 /// use std::sync::atomic::{AtomicPtr, Ordering};
877 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
878 /// *atomic_ptr.get_mut() = &mut 5;
879 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
882 #[stable(feature = "atomic_access", since = "1.15.0")]
883 pub fn get_mut(&mut self) -> &mut *mut T {
884 // SAFETY: the mutable reference guarantees unique ownership.
885 unsafe { &mut *self.p.get() }
888 /// Consumes the atomic and returns the contained value.
890 /// This is safe because passing `self` by value guarantees that no other threads are
891 /// concurrently accessing the atomic data.
896 /// use std::sync::atomic::AtomicPtr;
898 /// let atomic_ptr = AtomicPtr::new(&mut 5);
899 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
902 #[stable(feature = "atomic_access", since = "1.15.0")]
903 pub fn into_inner(self) -> *mut T {
907 /// Loads a value from the pointer.
909 /// `load` takes an [`Ordering`] argument which describes the memory ordering
910 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
914 /// Panics if `order` is [`Release`] or [`AcqRel`].
916 /// [`Ordering`]: enum.Ordering.html
917 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
918 /// [`Release`]: enum.Ordering.html#variant.Release
919 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
920 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
921 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
926 /// use std::sync::atomic::{AtomicPtr, Ordering};
928 /// let ptr = &mut 5;
929 /// let some_ptr = AtomicPtr::new(ptr);
931 /// let value = some_ptr.load(Ordering::Relaxed);
934 #[stable(feature = "rust1", since = "1.0.0")]
935 pub fn load(&self, order: Ordering) -> *mut T {
936 // SAFETY: data races are prevented by atomic intrinsics.
937 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
940 /// Stores a value into the pointer.
942 /// `store` takes an [`Ordering`] argument which describes the memory ordering
943 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
947 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
949 /// [`Ordering`]: enum.Ordering.html
950 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
951 /// [`Release`]: enum.Ordering.html#variant.Release
952 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
953 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
954 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
959 /// use std::sync::atomic::{AtomicPtr, Ordering};
961 /// let ptr = &mut 5;
962 /// let some_ptr = AtomicPtr::new(ptr);
964 /// let other_ptr = &mut 10;
966 /// some_ptr.store(other_ptr, Ordering::Relaxed);
969 #[stable(feature = "rust1", since = "1.0.0")]
970 pub fn store(&self, ptr: *mut T, order: Ordering) {
971 // SAFETY: data races are prevented by atomic intrinsics.
973 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
977 /// Stores a value into the pointer, returning the previous value.
979 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
980 /// of this operation. All ordering modes are possible. Note that using
981 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
982 /// using [`Release`] makes the load part [`Relaxed`].
984 /// [`Ordering`]: enum.Ordering.html
985 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
986 /// [`Release`]: enum.Ordering.html#variant.Release
987 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
992 /// use std::sync::atomic::{AtomicPtr, Ordering};
994 /// let ptr = &mut 5;
995 /// let some_ptr = AtomicPtr::new(ptr);
997 /// let other_ptr = &mut 10;
999 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1002 #[stable(feature = "rust1", since = "1.0.0")]
1003 #[cfg(target_has_atomic = "ptr")]
1004 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1005 // SAFETY: data races are prevented by atomic intrinsics.
1006 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1009 /// Stores a value into the pointer if the current value is the same as the `current` value.
1011 /// The return value is always the previous value. If it is equal to `current`, then the value
1014 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1015 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1016 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1017 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1018 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1020 /// [`Ordering`]: enum.Ordering.html
1021 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1022 /// [`Release`]: enum.Ordering.html#variant.Release
1023 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1024 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1029 /// use std::sync::atomic::{AtomicPtr, Ordering};
1031 /// let ptr = &mut 5;
1032 /// let some_ptr = AtomicPtr::new(ptr);
1034 /// let other_ptr = &mut 10;
1036 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1039 #[stable(feature = "rust1", since = "1.0.0")]
1040 #[cfg(target_has_atomic = "ptr")]
1041 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1042 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1048 /// Stores a value into the pointer if the current value is the same as the `current` value.
1050 /// The return value is a result indicating whether the new value was written and containing
1051 /// the previous value. On success this value is guaranteed to be equal to `current`.
1053 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1054 /// ordering of this operation. The first describes the required ordering if the
1055 /// operation succeeds while the second describes the required ordering when the
1056 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1057 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1058 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1059 /// and must be equivalent to or weaker than the success ordering.
1061 /// [`Ordering`]: enum.Ordering.html
1062 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1063 /// [`Release`]: enum.Ordering.html#variant.Release
1064 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1065 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1070 /// use std::sync::atomic::{AtomicPtr, Ordering};
1072 /// let ptr = &mut 5;
1073 /// let some_ptr = AtomicPtr::new(ptr);
1075 /// let other_ptr = &mut 10;
1077 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1078 /// Ordering::SeqCst, Ordering::Relaxed);
1081 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1082 #[cfg(target_has_atomic = "ptr")]
1083 pub fn compare_exchange(
1089 ) -> Result<*mut T, *mut T> {
1090 // SAFETY: data races are prevented by atomic intrinsics.
1092 let res = atomic_compare_exchange(
1093 self.p.get() as *mut usize,
1100 Ok(x) => Ok(x as *mut T),
1101 Err(x) => Err(x as *mut T),
1106 /// Stores a value into the pointer if the current value is the same as the `current` value.
1108 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1109 /// comparison succeeds, which can result in more efficient code on some platforms. The
1110 /// return value is a result indicating whether the new value was written and containing the
1113 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1114 /// ordering of this operation. The first describes the required ordering if the
1115 /// operation succeeds while the second describes the required ordering when the
1116 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1117 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1118 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1119 /// and must be equivalent to or weaker than the success ordering.
1121 /// [`compare_exchange`]: #method.compare_exchange
1122 /// [`Ordering`]: enum.Ordering.html
1123 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1124 /// [`Release`]: enum.Ordering.html#variant.Release
1125 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1126 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1131 /// use std::sync::atomic::{AtomicPtr, Ordering};
1133 /// let some_ptr = AtomicPtr::new(&mut 5);
1135 /// let new = &mut 10;
1136 /// let mut old = some_ptr.load(Ordering::Relaxed);
1138 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1140 /// Err(x) => old = x,
1145 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1146 #[cfg(target_has_atomic = "ptr")]
1147 pub fn compare_exchange_weak(
1153 ) -> Result<*mut T, *mut T> {
1154 // SAFETY: data races are prevented by atomic intrinsics.
1156 let res = atomic_compare_exchange_weak(
1157 self.p.get() as *mut usize,
1164 Ok(x) => Ok(x as *mut T),
1165 Err(x) => Err(x as *mut T),
1171 #[cfg(target_has_atomic_load_store = "8")]
1172 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1173 impl From<bool> for AtomicBool {
1174 /// Converts a `bool` into an `AtomicBool`.
1179 /// use std::sync::atomic::AtomicBool;
1180 /// let atomic_bool = AtomicBool::from(true);
1181 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1184 fn from(b: bool) -> Self {
1189 #[cfg(target_has_atomic_load_store = "ptr")]
1190 #[stable(feature = "atomic_from", since = "1.23.0")]
1191 impl<T> From<*mut T> for AtomicPtr<T> {
1193 fn from(p: *mut T) -> Self {
1198 #[cfg(target_has_atomic_load_store = "8")]
1199 macro_rules! atomic_int {
1204 $stable_access:meta,
1208 $stable_init_const:meta,
1209 $s_int_type:expr, $int_ref:expr,
1210 $extra_feature:expr,
1211 $min_fn:ident, $max_fn:ident,
1214 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1215 /// An integer type which can be safely shared between threads.
1217 /// This type has the same in-memory representation as the underlying
1218 /// integer type, [`
1219 #[doc = $s_int_type]
1222 /// ). For more about the differences between atomic types and
1223 /// non-atomic types as well as information about the portability of
1224 /// this type, please see the [module-level documentation].
1226 /// [module-level documentation]: index.html
1228 #[repr(C, align($align))]
1229 pub struct $atomic_type {
1230 v: UnsafeCell<$int_type>,
1233 /// An atomic integer initialized to `0`.
1234 #[$stable_init_const]
1237 reason = "the `new` function is now preferred",
1238 suggestion = $atomic_new,
1240 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1243 impl Default for $atomic_type {
1244 fn default() -> Self {
1245 Self::new(Default::default())
1250 impl From<$int_type> for $atomic_type {
1253 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1255 fn from(v: $int_type) -> Self { Self::new(v) }
1260 impl fmt::Debug for $atomic_type {
1261 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1262 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1266 // Send is implicitly implemented.
1268 unsafe impl Sync for $atomic_type {}
1272 concat!("Creates a new atomic integer.
1277 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1279 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1284 pub const fn new(v: $int_type) -> Self {
1285 Self {v: UnsafeCell::new(v)}
1290 concat!("Returns a mutable reference to the underlying integer.
1292 This is safe because the mutable reference guarantees that no other threads are
1293 concurrently accessing the atomic data.
1298 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1300 let mut some_var = ", stringify!($atomic_type), "::new(10);
1301 assert_eq!(*some_var.get_mut(), 10);
1302 *some_var.get_mut() = 5;
1303 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1307 pub fn get_mut(&mut self) -> &mut $int_type {
1308 // SAFETY: the mutable reference guarantees unique ownership.
1309 unsafe { &mut *self.v.get() }
1314 concat!("Consumes the atomic and returns the contained value.
1316 This is safe because passing `self` by value guarantees that no other threads are
1317 concurrently accessing the atomic data.
1322 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1324 let some_var = ", stringify!($atomic_type), "::new(5);
1325 assert_eq!(some_var.into_inner(), 5);
1329 pub fn into_inner(self) -> $int_type {
1335 concat!("Loads a value from the atomic integer.
1337 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1338 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1342 Panics if `order` is [`Release`] or [`AcqRel`].
1344 [`Ordering`]: enum.Ordering.html
1345 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1346 [`Release`]: enum.Ordering.html#variant.Release
1347 [`Acquire`]: enum.Ordering.html#variant.Acquire
1348 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1349 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1354 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1356 let some_var = ", stringify!($atomic_type), "::new(5);
1358 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1362 pub fn load(&self, order: Ordering) -> $int_type {
1363 // SAFETY: data races are prevented by atomic intrinsics.
1364 unsafe { atomic_load(self.v.get(), order) }
1369 concat!("Stores a value into the atomic integer.
1371 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1372 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1376 Panics if `order` is [`Acquire`] or [`AcqRel`].
1378 [`Ordering`]: enum.Ordering.html
1379 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1380 [`Release`]: enum.Ordering.html#variant.Release
1381 [`Acquire`]: enum.Ordering.html#variant.Acquire
1382 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1383 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1388 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1390 let some_var = ", stringify!($atomic_type), "::new(5);
1392 some_var.store(10, Ordering::Relaxed);
1393 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1397 pub fn store(&self, val: $int_type, order: Ordering) {
1398 // SAFETY: data races are prevented by atomic intrinsics.
1399 unsafe { atomic_store(self.v.get(), val, order); }
1404 concat!("Stores a value into the atomic integer, returning the previous value.
1406 `swap` takes an [`Ordering`] argument which describes the memory ordering
1407 of this operation. All ordering modes are possible. Note that using
1408 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1409 using [`Release`] makes the load part [`Relaxed`].
1411 [`Ordering`]: enum.Ordering.html
1412 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1413 [`Release`]: enum.Ordering.html#variant.Release
1414 [`Acquire`]: enum.Ordering.html#variant.Acquire
1419 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1421 let some_var = ", stringify!($atomic_type), "::new(5);
1423 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1428 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1429 // SAFETY: data races are prevented by atomic intrinsics.
1430 unsafe { atomic_swap(self.v.get(), val, order) }
1435 concat!("Stores a value into the atomic integer if the current value is the same as
1436 the `current` value.
1438 The return value is always the previous value. If it is equal to `current`, then the
1441 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1442 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1443 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1444 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1445 happens, and using [`Release`] makes the load part [`Relaxed`].
1447 [`Ordering`]: enum.Ordering.html
1448 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1449 [`Release`]: enum.Ordering.html#variant.Release
1450 [`Acquire`]: enum.Ordering.html#variant.Acquire
1451 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1456 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1458 let some_var = ", stringify!($atomic_type), "::new(5);
1460 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1461 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1463 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1464 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1469 pub fn compare_and_swap(&self,
1472 order: Ordering) -> $int_type {
1473 match self.compare_exchange(current,
1476 strongest_failure_ordering(order)) {
1484 concat!("Stores a value into the atomic integer if the current value is the same as
1485 the `current` value.
1487 The return value is a result indicating whether the new value was written and
1488 containing the previous value. On success this value is guaranteed to be equal to
1491 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1492 ordering of this operation. The first describes the required ordering if the
1493 operation succeeds while the second describes the required ordering when the
1494 operation fails. Using [`Acquire`] as success ordering makes the store part
1495 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1496 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1497 and must be equivalent to or weaker than the success ordering.
1499 [`Ordering`]: enum.Ordering.html
1500 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1501 [`Release`]: enum.Ordering.html#variant.Release
1502 [`Acquire`]: enum.Ordering.html#variant.Acquire
1503 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1508 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1510 let some_var = ", stringify!($atomic_type), "::new(5);
1512 assert_eq!(some_var.compare_exchange(5, 10,
1516 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1518 assert_eq!(some_var.compare_exchange(6, 12,
1522 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1527 pub fn compare_exchange(&self,
1531 failure: Ordering) -> Result<$int_type, $int_type> {
1532 // SAFETY: data races are prevented by atomic intrinsics.
1533 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1538 concat!("Stores a value into the atomic integer if the current value is the same as
1539 the `current` value.
1541 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1542 when the comparison succeeds, which can result in more efficient code on some
1543 platforms. The return value is a result indicating whether the new value was
1544 written and containing the previous value.
1546 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1547 ordering of this operation. The first describes the required ordering if the
1548 operation succeeds while the second describes the required ordering when the
1549 operation fails. Using [`Acquire`] as success ordering makes the store part
1550 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1551 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1552 and must be equivalent to or weaker than the success ordering.
1554 [`compare_exchange`]: #method.compare_exchange
1555 [`Ordering`]: enum.Ordering.html
1556 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1557 [`Release`]: enum.Ordering.html#variant.Release
1558 [`Acquire`]: enum.Ordering.html#variant.Acquire
1559 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1564 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1566 let val = ", stringify!($atomic_type), "::new(4);
1568 let mut old = val.load(Ordering::Relaxed);
1571 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1580 pub fn compare_exchange_weak(&self,
1584 failure: Ordering) -> Result<$int_type, $int_type> {
1585 // SAFETY: data races are prevented by atomic intrinsics.
1587 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1593 concat!("Adds to the current value, returning the previous value.
1595 This operation wraps around on overflow.
1597 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1598 of this operation. All ordering modes are possible. Note that using
1599 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1600 using [`Release`] makes the load part [`Relaxed`].
1602 [`Ordering`]: enum.Ordering.html
1603 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1604 [`Release`]: enum.Ordering.html#variant.Release
1605 [`Acquire`]: enum.Ordering.html#variant.Acquire
1610 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1612 let foo = ", stringify!($atomic_type), "::new(0);
1613 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1614 assert_eq!(foo.load(Ordering::SeqCst), 10);
1619 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1620 // SAFETY: data races are prevented by atomic intrinsics.
1621 unsafe { atomic_add(self.v.get(), val, order) }
1626 concat!("Subtracts from the current value, returning the previous value.
1628 This operation wraps around on overflow.
1630 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1631 of this operation. All ordering modes are possible. Note that using
1632 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1633 using [`Release`] makes the load part [`Relaxed`].
1635 [`Ordering`]: enum.Ordering.html
1636 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1637 [`Release`]: enum.Ordering.html#variant.Release
1638 [`Acquire`]: enum.Ordering.html#variant.Acquire
1643 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1645 let foo = ", stringify!($atomic_type), "::new(20);
1646 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1647 assert_eq!(foo.load(Ordering::SeqCst), 10);
1652 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1653 // SAFETY: data races are prevented by atomic intrinsics.
1654 unsafe { atomic_sub(self.v.get(), val, order) }
1659 concat!("Bitwise \"and\" with the current value.
1661 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1662 sets the new value to the result.
1664 Returns the previous value.
1666 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1667 of this operation. All ordering modes are possible. Note that using
1668 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1669 using [`Release`] makes the load part [`Relaxed`].
1671 [`Ordering`]: enum.Ordering.html
1672 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1673 [`Release`]: enum.Ordering.html#variant.Release
1674 [`Acquire`]: enum.Ordering.html#variant.Acquire
1679 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1681 let foo = ", stringify!($atomic_type), "::new(0b101101);
1682 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1683 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1688 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1689 // SAFETY: data races are prevented by atomic intrinsics.
1690 unsafe { atomic_and(self.v.get(), val, order) }
1695 concat!("Bitwise \"nand\" with the current value.
1697 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1698 sets the new value to the result.
1700 Returns the previous value.
1702 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1703 of this operation. All ordering modes are possible. Note that using
1704 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1705 using [`Release`] makes the load part [`Relaxed`].
1707 [`Ordering`]: enum.Ordering.html
1708 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1709 [`Release`]: enum.Ordering.html#variant.Release
1710 [`Acquire`]: enum.Ordering.html#variant.Acquire
1715 ", $extra_feature, "
1716 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1718 let foo = ", stringify!($atomic_type), "::new(0x13);
1719 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1720 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1725 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1726 // SAFETY: data races are prevented by atomic intrinsics.
1727 unsafe { atomic_nand(self.v.get(), val, order) }
1732 concat!("Bitwise \"or\" with the current value.
1734 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1735 sets the new value to the result.
1737 Returns the previous value.
1739 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1740 of this operation. All ordering modes are possible. Note that using
1741 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1742 using [`Release`] makes the load part [`Relaxed`].
1744 [`Ordering`]: enum.Ordering.html
1745 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1746 [`Release`]: enum.Ordering.html#variant.Release
1747 [`Acquire`]: enum.Ordering.html#variant.Acquire
1752 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1754 let foo = ", stringify!($atomic_type), "::new(0b101101);
1755 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1756 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1761 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1762 // SAFETY: data races are prevented by atomic intrinsics.
1763 unsafe { atomic_or(self.v.get(), val, order) }
1768 concat!("Bitwise \"xor\" with the current value.
1770 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1771 sets the new value to the result.
1773 Returns the previous value.
1775 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1776 of this operation. All ordering modes are possible. Note that using
1777 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1778 using [`Release`] makes the load part [`Relaxed`].
1780 [`Ordering`]: enum.Ordering.html
1781 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1782 [`Release`]: enum.Ordering.html#variant.Release
1783 [`Acquire`]: enum.Ordering.html#variant.Acquire
1788 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1790 let foo = ", stringify!($atomic_type), "::new(0b101101);
1791 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1792 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1797 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1798 // SAFETY: data races are prevented by atomic intrinsics.
1799 unsafe { atomic_xor(self.v.get(), val, order) }
1804 concat!("Fetches the value, and applies a function to it that returns an optional
1805 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1806 `Err(previous_value)`.
1808 Note: This may call the function multiple times if the value has been changed from other threads in
1809 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1810 but once to the stored value.
1812 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1813 ordering of this operation. The first describes the required ordering for loads
1814 and failed updates while the second describes the required ordering when the
1815 operation finally succeeds. Beware that this is different from the two
1816 modes in [`compare_exchange`]!
1818 Using [`Acquire`] as success ordering makes the store part
1819 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1820 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1821 and must be equivalent to or weaker than the success ordering.
1823 [`bool`]: ../../../std/primitive.bool.html
1824 [`compare_exchange`]: #method.compare_exchange
1825 [`Ordering`]: enum.Ordering.html
1826 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1827 [`Release`]: enum.Ordering.html#variant.Release
1828 [`Acquire`]: enum.Ordering.html#variant.Acquire
1829 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1834 #![feature(no_more_cas)]
1835 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1837 let x = ", stringify!($atomic_type), "::new(7);
1838 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1839 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1840 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1841 assert_eq!(x.load(Ordering::SeqCst), 9);
1844 #[unstable(feature = "no_more_cas",
1845 reason = "no more CAS loops in user code",
1848 pub fn fetch_update<F>(&self,
1850 fetch_order: Ordering,
1851 set_order: Ordering) -> Result<$int_type, $int_type>
1852 where F: FnMut($int_type) -> Option<$int_type> {
1853 let mut prev = self.load(fetch_order);
1854 while let Some(next) = f(prev) {
1855 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1856 x @ Ok(_) => return x,
1857 Err(next_prev) => prev = next_prev
1865 concat!("Maximum with the current value.
1867 Finds the maximum of the current value and the argument `val`, and
1868 sets the new value to the result.
1870 Returns the previous value.
1872 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1873 of this operation. All ordering modes are possible. Note that using
1874 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1875 using [`Release`] makes the load part [`Relaxed`].
1877 [`Ordering`]: enum.Ordering.html
1878 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1879 [`Release`]: enum.Ordering.html#variant.Release
1880 [`Acquire`]: enum.Ordering.html#variant.Acquire
1885 #![feature(atomic_min_max)]
1886 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1888 let foo = ", stringify!($atomic_type), "::new(23);
1889 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1890 assert_eq!(foo.load(Ordering::SeqCst), 42);
1893 If you want to obtain the maximum value in one step, you can use the following:
1896 #![feature(atomic_min_max)]
1897 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1899 let foo = ", stringify!($atomic_type), "::new(23);
1901 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1902 assert!(max_foo == 42);
1905 #[unstable(feature = "atomic_min_max",
1906 reason = "easier and faster min/max than writing manual CAS loop",
1909 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1910 // SAFETY: data races are prevented by atomic intrinsics.
1911 unsafe { $max_fn(self.v.get(), val, order) }
1916 concat!("Minimum with the current value.
1918 Finds the minimum of the current value and the argument `val`, and
1919 sets the new value to the result.
1921 Returns the previous value.
1923 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1924 of this operation. All ordering modes are possible. Note that using
1925 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1926 using [`Release`] makes the load part [`Relaxed`].
1928 [`Ordering`]: enum.Ordering.html
1929 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1930 [`Release`]: enum.Ordering.html#variant.Release
1931 [`Acquire`]: enum.Ordering.html#variant.Acquire
1936 #![feature(atomic_min_max)]
1937 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1939 let foo = ", stringify!($atomic_type), "::new(23);
1940 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1941 assert_eq!(foo.load(Ordering::Relaxed), 23);
1942 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1943 assert_eq!(foo.load(Ordering::Relaxed), 22);
1946 If you want to obtain the minimum value in one step, you can use the following:
1949 #![feature(atomic_min_max)]
1950 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1952 let foo = ", stringify!($atomic_type), "::new(23);
1954 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1955 assert_eq!(min_foo, 12);
1958 #[unstable(feature = "atomic_min_max",
1959 reason = "easier and faster min/max than writing manual CAS loop",
1962 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1963 // SAFETY: data races are prevented by atomic intrinsics.
1964 unsafe { $min_fn(self.v.get(), val, order) }
1969 concat!("Returns a mutable pointer to the underlying integer.
1971 Doing non-atomic reads and writes on the resulting integer can be a data race.
1972 This method is mostly useful for FFI, where the function signature may use
1973 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1975 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1976 atomic types work with interior mutability. All modifications of an atomic change the value
1977 through a shared reference, and can do so safely as long as they use atomic operations. Any
1978 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
1979 restriction: operations on it must be atomic.
1983 ```ignore (extern-declaration)
1985 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1988 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1991 let mut atomic = ", stringify!($atomic_type), "::new(1);
1993 // SAFETY: Safe as long as `my_atomic_op` is atomic.
1995 my_atomic_op(atomic.as_mut_ptr());
2000 #[unstable(feature = "atomic_mut_ptr",
2001 reason = "recently added",
2003 pub fn as_mut_ptr(&self) -> *mut $int_type {
2011 #[cfg(target_has_atomic_load_store = "8")]
2013 cfg(target_has_atomic = "8"),
2014 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2015 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2016 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2017 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2018 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2019 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2020 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2021 unstable(feature = "integer_atomics", issue = "32976"),
2022 "i8", "../../../std/primitive.i8.html",
2024 atomic_min, atomic_max,
2027 i8 AtomicI8 ATOMIC_I8_INIT
2029 #[cfg(target_has_atomic_load_store = "8")]
2031 cfg(target_has_atomic = "8"),
2032 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2033 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2034 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2035 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2036 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2037 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2038 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2039 unstable(feature = "integer_atomics", issue = "32976"),
2040 "u8", "../../../std/primitive.u8.html",
2042 atomic_umin, atomic_umax,
2045 u8 AtomicU8 ATOMIC_U8_INIT
2047 #[cfg(target_has_atomic_load_store = "16")]
2049 cfg(target_has_atomic = "16"),
2050 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2051 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2052 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2053 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2054 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2055 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2056 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2057 unstable(feature = "integer_atomics", issue = "32976"),
2058 "i16", "../../../std/primitive.i16.html",
2060 atomic_min, atomic_max,
2062 "AtomicI16::new(0)",
2063 i16 AtomicI16 ATOMIC_I16_INIT
2065 #[cfg(target_has_atomic_load_store = "16")]
2067 cfg(target_has_atomic = "16"),
2068 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2069 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2070 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2071 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2072 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2073 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2074 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2075 unstable(feature = "integer_atomics", issue = "32976"),
2076 "u16", "../../../std/primitive.u16.html",
2078 atomic_umin, atomic_umax,
2080 "AtomicU16::new(0)",
2081 u16 AtomicU16 ATOMIC_U16_INIT
2083 #[cfg(target_has_atomic_load_store = "32")]
2085 cfg(target_has_atomic = "32"),
2086 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2087 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2088 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2089 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2090 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2091 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2092 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2093 unstable(feature = "integer_atomics", issue = "32976"),
2094 "i32", "../../../std/primitive.i32.html",
2096 atomic_min, atomic_max,
2098 "AtomicI32::new(0)",
2099 i32 AtomicI32 ATOMIC_I32_INIT
2101 #[cfg(target_has_atomic_load_store = "32")]
2103 cfg(target_has_atomic = "32"),
2104 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2105 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2106 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2107 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2108 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2109 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2110 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2111 unstable(feature = "integer_atomics", issue = "32976"),
2112 "u32", "../../../std/primitive.u32.html",
2114 atomic_umin, atomic_umax,
2116 "AtomicU32::new(0)",
2117 u32 AtomicU32 ATOMIC_U32_INIT
2119 #[cfg(target_has_atomic_load_store = "64")]
2121 cfg(target_has_atomic = "64"),
2122 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2123 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2124 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2125 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2126 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2127 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2128 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2129 unstable(feature = "integer_atomics", issue = "32976"),
2130 "i64", "../../../std/primitive.i64.html",
2132 atomic_min, atomic_max,
2134 "AtomicI64::new(0)",
2135 i64 AtomicI64 ATOMIC_I64_INIT
2137 #[cfg(target_has_atomic_load_store = "64")]
2139 cfg(target_has_atomic = "64"),
2140 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2141 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2142 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2143 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2144 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2145 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2146 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2147 unstable(feature = "integer_atomics", issue = "32976"),
2148 "u64", "../../../std/primitive.u64.html",
2150 atomic_umin, atomic_umax,
2152 "AtomicU64::new(0)",
2153 u64 AtomicU64 ATOMIC_U64_INIT
2155 #[cfg(target_has_atomic_load_store = "128")]
2157 cfg(target_has_atomic = "128"),
2158 unstable(feature = "integer_atomics", issue = "32976"),
2159 unstable(feature = "integer_atomics", issue = "32976"),
2160 unstable(feature = "integer_atomics", issue = "32976"),
2161 unstable(feature = "integer_atomics", issue = "32976"),
2162 unstable(feature = "integer_atomics", issue = "32976"),
2163 unstable(feature = "integer_atomics", issue = "32976"),
2164 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2165 unstable(feature = "integer_atomics", issue = "32976"),
2166 "i128", "../../../std/primitive.i128.html",
2167 "#![feature(integer_atomics)]\n\n",
2168 atomic_min, atomic_max,
2170 "AtomicI128::new(0)",
2171 i128 AtomicI128 ATOMIC_I128_INIT
2173 #[cfg(target_has_atomic_load_store = "128")]
2175 cfg(target_has_atomic = "128"),
2176 unstable(feature = "integer_atomics", issue = "32976"),
2177 unstable(feature = "integer_atomics", issue = "32976"),
2178 unstable(feature = "integer_atomics", issue = "32976"),
2179 unstable(feature = "integer_atomics", issue = "32976"),
2180 unstable(feature = "integer_atomics", issue = "32976"),
2181 unstable(feature = "integer_atomics", issue = "32976"),
2182 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2183 unstable(feature = "integer_atomics", issue = "32976"),
2184 "u128", "../../../std/primitive.u128.html",
2185 "#![feature(integer_atomics)]\n\n",
2186 atomic_umin, atomic_umax,
2188 "AtomicU128::new(0)",
2189 u128 AtomicU128 ATOMIC_U128_INIT
2191 #[cfg(target_has_atomic_load_store = "ptr")]
2192 #[cfg(target_pointer_width = "16")]
2193 macro_rules! ptr_width {
2198 #[cfg(target_has_atomic_load_store = "ptr")]
2199 #[cfg(target_pointer_width = "32")]
2200 macro_rules! ptr_width {
2205 #[cfg(target_has_atomic_load_store = "ptr")]
2206 #[cfg(target_pointer_width = "64")]
2207 macro_rules! ptr_width {
2212 #[cfg(target_has_atomic_load_store = "ptr")]
2214 cfg(target_has_atomic = "ptr"),
2215 stable(feature = "rust1", since = "1.0.0"),
2216 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2217 stable(feature = "atomic_debug", since = "1.3.0"),
2218 stable(feature = "atomic_access", since = "1.15.0"),
2219 stable(feature = "atomic_from", since = "1.23.0"),
2220 stable(feature = "atomic_nand", since = "1.27.0"),
2221 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2222 stable(feature = "rust1", since = "1.0.0"),
2223 "isize", "../../../std/primitive.isize.html",
2225 atomic_min, atomic_max,
2227 "AtomicIsize::new(0)",
2228 isize AtomicIsize ATOMIC_ISIZE_INIT
2230 #[cfg(target_has_atomic_load_store = "ptr")]
2232 cfg(target_has_atomic = "ptr"),
2233 stable(feature = "rust1", since = "1.0.0"),
2234 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2235 stable(feature = "atomic_debug", since = "1.3.0"),
2236 stable(feature = "atomic_access", since = "1.15.0"),
2237 stable(feature = "atomic_from", since = "1.23.0"),
2238 stable(feature = "atomic_nand", since = "1.27.0"),
2239 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2240 stable(feature = "rust1", since = "1.0.0"),
2241 "usize", "../../../std/primitive.usize.html",
2243 atomic_umin, atomic_umax,
2245 "AtomicUsize::new(0)",
2246 usize AtomicUsize ATOMIC_USIZE_INIT
2250 #[cfg(target_has_atomic = "8")]
2251 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2262 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
2264 Release => intrinsics::atomic_store_rel(dst, val),
2265 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2266 SeqCst => intrinsics::atomic_store(dst, val),
2267 Acquire => panic!("there is no such thing as an acquire store"),
2268 AcqRel => panic!("there is no such thing as an acquire/release store"),
2273 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
2275 Acquire => intrinsics::atomic_load_acq(dst),
2276 Relaxed => intrinsics::atomic_load_relaxed(dst),
2277 SeqCst => intrinsics::atomic_load(dst),
2278 Release => panic!("there is no such thing as a release load"),
2279 AcqRel => panic!("there is no such thing as an acquire/release load"),
2284 #[cfg(target_has_atomic = "8")]
2285 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2287 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2288 Release => intrinsics::atomic_xchg_rel(dst, val),
2289 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2290 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2291 SeqCst => intrinsics::atomic_xchg(dst, val),
2295 /// Returns the previous value (like __sync_fetch_and_add).
2297 #[cfg(target_has_atomic = "8")]
2298 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2300 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2301 Release => intrinsics::atomic_xadd_rel(dst, val),
2302 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2303 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2304 SeqCst => intrinsics::atomic_xadd(dst, val),
2308 /// Returns the previous value (like __sync_fetch_and_sub).
2310 #[cfg(target_has_atomic = "8")]
2311 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2313 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2314 Release => intrinsics::atomic_xsub_rel(dst, val),
2315 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2316 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2317 SeqCst => intrinsics::atomic_xsub(dst, val),
2322 #[cfg(target_has_atomic = "8")]
2323 unsafe fn atomic_compare_exchange<T: Copy>(
2330 let (val, ok) = match (success, failure) {
2331 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2332 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2333 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2334 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2335 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2336 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2337 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2338 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2339 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2340 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2341 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2342 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2344 if ok { Ok(val) } else { Err(val) }
2348 #[cfg(target_has_atomic = "8")]
2349 unsafe fn atomic_compare_exchange_weak<T: Copy>(
2356 let (val, ok) = match (success, failure) {
2357 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2358 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2359 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2360 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2361 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2362 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2363 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2364 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2365 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2366 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2367 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2368 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2370 if ok { Ok(val) } else { Err(val) }
2374 #[cfg(target_has_atomic = "8")]
2375 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2377 Acquire => intrinsics::atomic_and_acq(dst, val),
2378 Release => intrinsics::atomic_and_rel(dst, val),
2379 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2380 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2381 SeqCst => intrinsics::atomic_and(dst, val),
2386 #[cfg(target_has_atomic = "8")]
2387 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2389 Acquire => intrinsics::atomic_nand_acq(dst, val),
2390 Release => intrinsics::atomic_nand_rel(dst, val),
2391 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2392 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2393 SeqCst => intrinsics::atomic_nand(dst, val),
2398 #[cfg(target_has_atomic = "8")]
2399 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2401 Acquire => intrinsics::atomic_or_acq(dst, val),
2402 Release => intrinsics::atomic_or_rel(dst, val),
2403 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2404 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2405 SeqCst => intrinsics::atomic_or(dst, val),
2410 #[cfg(target_has_atomic = "8")]
2411 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2413 Acquire => intrinsics::atomic_xor_acq(dst, val),
2414 Release => intrinsics::atomic_xor_rel(dst, val),
2415 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2416 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2417 SeqCst => intrinsics::atomic_xor(dst, val),
2421 /// returns the max value (signed comparison)
2423 #[cfg(target_has_atomic = "8")]
2424 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2426 Acquire => intrinsics::atomic_max_acq(dst, val),
2427 Release => intrinsics::atomic_max_rel(dst, val),
2428 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2429 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2430 SeqCst => intrinsics::atomic_max(dst, val),
2434 /// returns the min value (signed comparison)
2436 #[cfg(target_has_atomic = "8")]
2437 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2439 Acquire => intrinsics::atomic_min_acq(dst, val),
2440 Release => intrinsics::atomic_min_rel(dst, val),
2441 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2442 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2443 SeqCst => intrinsics::atomic_min(dst, val),
2447 /// returns the max value (unsigned comparison)
2449 #[cfg(target_has_atomic = "8")]
2450 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2452 Acquire => intrinsics::atomic_umax_acq(dst, val),
2453 Release => intrinsics::atomic_umax_rel(dst, val),
2454 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2455 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2456 SeqCst => intrinsics::atomic_umax(dst, val),
2460 /// returns the min value (unsigned comparison)
2462 #[cfg(target_has_atomic = "8")]
2463 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
2465 Acquire => intrinsics::atomic_umin_acq(dst, val),
2466 Release => intrinsics::atomic_umin_rel(dst, val),
2467 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2468 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2469 SeqCst => intrinsics::atomic_umin(dst, val),
2473 /// An atomic fence.
2475 /// Depending on the specified order, a fence prevents the compiler and CPU from
2476 /// reordering certain types of memory operations around it.
2477 /// That creates synchronizes-with relationships between it and atomic operations
2478 /// or fences in other threads.
2480 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2481 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2482 /// exist operations X and Y, both operating on some atomic object 'M' such
2483 /// that A is sequenced before X, Y is synchronized before B and Y observes
2484 /// the change to M. This provides a happens-before dependence between A and B.
2487 /// Thread 1 Thread 2
2489 /// fence(Release); A --------------
2490 /// x.store(3, Relaxed); X --------- |
2493 /// -------------> Y if x.load(Relaxed) == 3 {
2494 /// |-------> B fence(Acquire);
2499 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2502 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2503 /// and [`Release`] semantics, participates in the global program order of the
2504 /// other [`SeqCst`] operations and/or fences.
2506 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2510 /// Panics if `order` is [`Relaxed`].
2515 /// use std::sync::atomic::AtomicBool;
2516 /// use std::sync::atomic::fence;
2517 /// use std::sync::atomic::Ordering;
2519 /// // A mutual exclusion primitive based on spinlock.
2520 /// pub struct Mutex {
2521 /// flag: AtomicBool,
2525 /// pub fn new() -> Mutex {
2527 /// flag: AtomicBool::new(false),
2531 /// pub fn lock(&self) {
2532 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2533 /// // This fence synchronizes-with store in `unlock`.
2534 /// fence(Ordering::Acquire);
2537 /// pub fn unlock(&self) {
2538 /// self.flag.store(false, Ordering::Release);
2543 /// [`Ordering`]: enum.Ordering.html
2544 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2545 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2546 /// [`Release`]: enum.Ordering.html#variant.Release
2547 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2548 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2550 #[stable(feature = "rust1", since = "1.0.0")]
2551 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2552 pub fn fence(order: Ordering) {
2553 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2554 // they will cause LLVM to abort. The wasm instruction set doesn't have
2555 // fences right now. There's discussion online about the best way for tools
2556 // to conventionally implement fences at
2557 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2558 // follow that discussion and implement a solution when one comes about!
2559 #[cfg(not(target_arch = "wasm32"))]
2560 // SAFETY: using an atomic fence is safe.
2563 Acquire => intrinsics::atomic_fence_acq(),
2564 Release => intrinsics::atomic_fence_rel(),
2565 AcqRel => intrinsics::atomic_fence_acqrel(),
2566 SeqCst => intrinsics::atomic_fence(),
2567 Relaxed => panic!("there is no such thing as a relaxed fence"),
2572 /// A compiler memory fence.
2574 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2575 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2576 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2577 /// or writes from before or after the call to the other side of the call to
2578 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2579 /// from doing such re-ordering. This is not a problem in a single-threaded,
2580 /// execution context, but when other threads may modify memory at the same
2581 /// time, stronger synchronization primitives such as [`fence`] are required.
2583 /// The re-ordering prevented by the different ordering semantics are:
2585 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2586 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2587 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2588 /// - with [`AcqRel`], both of the above rules are enforced.
2590 /// `compiler_fence` is generally only useful for preventing a thread from
2591 /// racing *with itself*. That is, if a given thread is executing one piece
2592 /// of code, and is then interrupted, and starts executing code elsewhere
2593 /// (while still in the same thread, and conceptually still on the same
2594 /// core). In traditional programs, this can only occur when a signal
2595 /// handler is registered. In more low-level code, such situations can also
2596 /// arise when handling interrupts, when implementing green threads with
2597 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2598 /// discussion of [memory barriers].
2602 /// Panics if `order` is [`Relaxed`].
2606 /// Without `compiler_fence`, the `assert_eq!` in following code
2607 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2608 /// To see why, remember that the compiler is free to swap the stores to
2609 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2610 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2611 /// after `IS_READY` is updated, then the signal handler will see
2612 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2613 /// Using a `compiler_fence` remedies this situation.
2616 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2617 /// use std::sync::atomic::Ordering;
2618 /// use std::sync::atomic::compiler_fence;
2620 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2621 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2624 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2625 /// // prevent earlier writes from being moved beyond this point
2626 /// compiler_fence(Ordering::Release);
2627 /// IS_READY.store(true, Ordering::Relaxed);
2630 /// fn signal_handler() {
2631 /// if IS_READY.load(Ordering::Relaxed) {
2632 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2637 /// [`fence`]: fn.fence.html
2638 /// [`Ordering`]: enum.Ordering.html
2639 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2640 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2641 /// [`Release`]: enum.Ordering.html#variant.Release
2642 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2643 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2644 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2646 #[stable(feature = "compiler_fences", since = "1.21.0")]
2647 pub fn compiler_fence(order: Ordering) {
2648 // SAFETY: using an atomic fence is safe.
2651 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2652 Release => intrinsics::atomic_singlethreadfence_rel(),
2653 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2654 SeqCst => intrinsics::atomic_singlethreadfence(),
2655 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2660 #[cfg(target_has_atomic_load_store = "8")]
2661 #[stable(feature = "atomic_debug", since = "1.3.0")]
2662 impl fmt::Debug for AtomicBool {
2663 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2664 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2668 #[cfg(target_has_atomic_load_store = "ptr")]
2669 #[stable(feature = "atomic_debug", since = "1.3.0")]
2670 impl<T> fmt::Debug for AtomicPtr<T> {
2671 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2672 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2676 #[cfg(target_has_atomic_load_store = "ptr")]
2677 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2678 impl<T> fmt::Pointer for AtomicPtr<T> {
2679 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2680 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)