3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 // ignore-tidy-undocumented-unsafe
117 #![stable(feature = "rust1", since = "1.0.0")]
118 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
119 #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
121 use self::Ordering::*;
123 use crate::cell::UnsafeCell;
125 use crate::intrinsics;
127 use crate::hint::spin_loop;
129 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
131 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
132 /// power or switching hyper-threads.
134 /// This function is different from [`std::thread::yield_now`] which directly yields to the
135 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
137 /// Spin locks can be very efficient for short lock durations because they do not involve context
138 /// switches or interaction with the operating system. For long lock durations they become wasteful
139 /// however because they use CPU cycles for the entire lock duration, and using a
140 /// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is
141 /// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`]
142 /// or [`std::thread::sleep`] may be the best option.
144 /// **Note**: Spin locks are based on the underlying assumption that another thread will release
145 /// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or
146 /// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms.
148 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
149 /// do anything at all.
151 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
152 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
153 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
155 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
156 pub fn spin_loop_hint() {
160 /// A boolean type which can be safely shared between threads.
162 /// This type has the same in-memory representation as a [`bool`].
164 /// [`bool`]: ../../../std/primitive.bool.html
165 #[cfg(target_has_atomic_load_store = "8")]
166 #[stable(feature = "rust1", since = "1.0.0")]
168 pub struct AtomicBool {
172 #[cfg(target_has_atomic_load_store = "8")]
173 #[stable(feature = "rust1", since = "1.0.0")]
174 impl Default for AtomicBool {
175 /// Creates an `AtomicBool` initialized to `false`.
176 fn default() -> Self {
181 // Send is implicitly implemented for AtomicBool.
182 #[cfg(target_has_atomic_load_store = "8")]
183 #[stable(feature = "rust1", since = "1.0.0")]
184 unsafe impl Sync for AtomicBool {}
186 /// A raw pointer type which can be safely shared between threads.
188 /// This type has the same in-memory representation as a `*mut T`.
189 #[cfg(target_has_atomic_load_store = "ptr")]
190 #[stable(feature = "rust1", since = "1.0.0")]
191 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
192 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
193 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
194 pub struct AtomicPtr<T> {
195 p: UnsafeCell<*mut T>,
198 #[cfg(target_has_atomic_load_store = "ptr")]
199 #[stable(feature = "rust1", since = "1.0.0")]
200 impl<T> Default for AtomicPtr<T> {
201 /// Creates a null `AtomicPtr<T>`.
202 fn default() -> AtomicPtr<T> {
203 AtomicPtr::new(crate::ptr::null_mut())
207 #[cfg(target_has_atomic_load_store = "ptr")]
208 #[stable(feature = "rust1", since = "1.0.0")]
209 unsafe impl<T> Send for AtomicPtr<T> {}
210 #[cfg(target_has_atomic_load_store = "ptr")]
211 #[stable(feature = "rust1", since = "1.0.0")]
212 unsafe impl<T> Sync for AtomicPtr<T> {}
214 /// Atomic memory orderings
216 /// Memory orderings specify the way atomic operations synchronize memory.
217 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
218 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
219 /// operations synchronize other memory while additionally preserving a total order of such
220 /// operations across all threads.
222 /// Rust's memory orderings are [the same as those of
223 /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
225 /// For more information see the [nomicon].
227 /// [nomicon]: ../../../nomicon/atomics.html
228 /// [Ordering::Relaxed]: #variant.Relaxed
229 /// [Ordering::SeqCst]: #variant.SeqCst
230 #[stable(feature = "rust1", since = "1.0.0")]
231 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
234 /// No ordering constraints, only atomic operations.
236 /// Corresponds to [`memory_order_relaxed`] in C++20.
238 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
239 #[stable(feature = "rust1", since = "1.0.0")]
241 /// When coupled with a store, all previous operations become ordered
242 /// before any load of this value with [`Acquire`] (or stronger) ordering.
243 /// In particular, all previous writes become visible to all threads
244 /// that perform an [`Acquire`] (or stronger) load of this value.
246 /// Notice that using this ordering for an operation that combines loads
247 /// and stores leads to a [`Relaxed`] load operation!
249 /// This ordering is only applicable for operations that can perform a store.
251 /// Corresponds to [`memory_order_release`] in C++20.
253 /// [`Release`]: #variant.Release
254 /// [`Acquire`]: #variant.Acquire
255 /// [`Relaxed`]: #variant.Relaxed
256 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
257 #[stable(feature = "rust1", since = "1.0.0")]
259 /// When coupled with a load, if the loaded value was written by a store operation with
260 /// [`Release`] (or stronger) ordering, then all subsequent operations
261 /// become ordered after that store. In particular, all subsequent loads will see data
262 /// written before the store.
264 /// Notice that using this ordering for an operation that combines loads
265 /// and stores leads to a [`Relaxed`] store operation!
267 /// This ordering is only applicable for operations that can perform a load.
269 /// Corresponds to [`memory_order_acquire`] in C++20.
271 /// [`Acquire`]: #variant.Acquire
272 /// [`Release`]: #variant.Release
273 /// [`Relaxed`]: #variant.Relaxed
274 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
275 #[stable(feature = "rust1", since = "1.0.0")]
277 /// Has the effects of both [`Acquire`] and [`Release`] together:
278 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
280 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
281 /// not performing any store and hence it has just [`Acquire`] ordering. However,
282 /// `AcqRel` will never perform [`Relaxed`] accesses.
284 /// This ordering is only applicable for operations that combine both loads and stores.
286 /// Corresponds to [`memory_order_acq_rel`] in C++20.
288 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
289 /// [`Acquire`]: #variant.Acquire
290 /// [`Release`]: #variant.Release
291 /// [`Relaxed`]: #variant.Relaxed
292 #[stable(feature = "rust1", since = "1.0.0")]
294 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
295 /// operations, respectively) with the additional guarantee that all threads see all
296 /// sequentially consistent operations in the same order.
298 /// Corresponds to [`memory_order_seq_cst`] in C++20.
300 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
301 /// [`Acquire`]: #variant.Acquire
302 /// [`Release`]: #variant.Release
303 /// [`AcqRel`]: #variant.AcqRel
304 #[stable(feature = "rust1", since = "1.0.0")]
308 /// An [`AtomicBool`] initialized to `false`.
310 /// [`AtomicBool`]: struct.AtomicBool.html
311 #[cfg(target_has_atomic_load_store = "8")]
312 #[stable(feature = "rust1", since = "1.0.0")]
315 reason = "the `new` function is now preferred",
316 suggestion = "AtomicBool::new(false)"
318 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
320 #[cfg(target_has_atomic_load_store = "8")]
322 /// Creates a new `AtomicBool`.
327 /// use std::sync::atomic::AtomicBool;
329 /// let atomic_true = AtomicBool::new(true);
330 /// let atomic_false = AtomicBool::new(false);
333 #[stable(feature = "rust1", since = "1.0.0")]
334 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
335 pub const fn new(v: bool) -> AtomicBool {
336 AtomicBool { v: UnsafeCell::new(v as u8) }
339 /// Returns a mutable reference to the underlying [`bool`].
341 /// This is safe because the mutable reference guarantees that no other threads are
342 /// concurrently accessing the atomic data.
344 /// [`bool`]: ../../../std/primitive.bool.html
349 /// use std::sync::atomic::{AtomicBool, Ordering};
351 /// let mut some_bool = AtomicBool::new(true);
352 /// assert_eq!(*some_bool.get_mut(), true);
353 /// *some_bool.get_mut() = false;
354 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
357 #[stable(feature = "atomic_access", since = "1.15.0")]
358 pub fn get_mut(&mut self) -> &mut bool {
359 unsafe { &mut *(self.v.get() as *mut bool) }
362 /// Consumes the atomic and returns the contained value.
364 /// This is safe because passing `self` by value guarantees that no other threads are
365 /// concurrently accessing the atomic data.
370 /// use std::sync::atomic::AtomicBool;
372 /// let some_bool = AtomicBool::new(true);
373 /// assert_eq!(some_bool.into_inner(), true);
376 #[stable(feature = "atomic_access", since = "1.15.0")]
377 pub fn into_inner(self) -> bool {
378 self.v.into_inner() != 0
381 /// Loads a value from the bool.
383 /// `load` takes an [`Ordering`] argument which describes the memory ordering
384 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
388 /// Panics if `order` is [`Release`] or [`AcqRel`].
390 /// [`Ordering`]: enum.Ordering.html
391 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
392 /// [`Release`]: enum.Ordering.html#variant.Release
393 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
394 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
395 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
400 /// use std::sync::atomic::{AtomicBool, Ordering};
402 /// let some_bool = AtomicBool::new(true);
404 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
407 #[stable(feature = "rust1", since = "1.0.0")]
408 pub fn load(&self, order: Ordering) -> bool {
409 unsafe { atomic_load(self.v.get(), order) != 0 }
412 /// Stores a value into the bool.
414 /// `store` takes an [`Ordering`] argument which describes the memory ordering
415 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
419 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
421 /// [`Ordering`]: enum.Ordering.html
422 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
423 /// [`Release`]: enum.Ordering.html#variant.Release
424 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
425 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
426 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
431 /// use std::sync::atomic::{AtomicBool, Ordering};
433 /// let some_bool = AtomicBool::new(true);
435 /// some_bool.store(false, Ordering::Relaxed);
436 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
439 #[stable(feature = "rust1", since = "1.0.0")]
440 pub fn store(&self, val: bool, order: Ordering) {
442 atomic_store(self.v.get(), val as u8, order);
446 /// Stores a value into the bool, returning the previous value.
448 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
449 /// of this operation. All ordering modes are possible. Note that using
450 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
451 /// using [`Release`] makes the load part [`Relaxed`].
453 /// [`Ordering`]: enum.Ordering.html
454 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
455 /// [`Release`]: enum.Ordering.html#variant.Release
456 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
461 /// use std::sync::atomic::{AtomicBool, Ordering};
463 /// let some_bool = AtomicBool::new(true);
465 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
466 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
469 #[stable(feature = "rust1", since = "1.0.0")]
470 #[cfg(target_has_atomic = "8")]
471 pub fn swap(&self, val: bool, order: Ordering) -> bool {
472 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
475 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
477 /// The return value is always the previous value. If it is equal to `current`, then the value
480 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
481 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
482 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
483 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
484 /// happens, and using [`Release`] makes the load part [`Relaxed`].
486 /// [`Ordering`]: enum.Ordering.html
487 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
488 /// [`Release`]: enum.Ordering.html#variant.Release
489 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
490 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
491 /// [`bool`]: ../../../std/primitive.bool.html
496 /// use std::sync::atomic::{AtomicBool, Ordering};
498 /// let some_bool = AtomicBool::new(true);
500 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
501 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
503 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
504 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
507 #[stable(feature = "rust1", since = "1.0.0")]
508 #[cfg(target_has_atomic = "8")]
509 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
510 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
516 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
518 /// The return value is a result indicating whether the new value was written and containing
519 /// the previous value. On success this value is guaranteed to be equal to `current`.
521 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
522 /// ordering of this operation. The first describes the required ordering if the
523 /// operation succeeds while the second describes the required ordering when the
524 /// operation fails. Using [`Acquire`] as success ordering makes the store part
525 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
526 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
527 /// and must be equivalent to or weaker than the success ordering.
530 /// [`bool`]: ../../../std/primitive.bool.html
531 /// [`Ordering`]: enum.Ordering.html
532 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
533 /// [`Release`]: enum.Ordering.html#variant.Release
534 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
535 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
540 /// use std::sync::atomic::{AtomicBool, Ordering};
542 /// let some_bool = AtomicBool::new(true);
544 /// assert_eq!(some_bool.compare_exchange(true,
546 /// Ordering::Acquire,
547 /// Ordering::Relaxed),
549 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
551 /// assert_eq!(some_bool.compare_exchange(true, true,
552 /// Ordering::SeqCst,
553 /// Ordering::Acquire),
555 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
558 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
559 #[cfg(target_has_atomic = "8")]
560 pub fn compare_exchange(
566 ) -> Result<bool, bool> {
568 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
571 Err(x) => Err(x != 0),
575 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
577 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
578 /// comparison succeeds, which can result in more efficient code on some platforms. The
579 /// return value is a result indicating whether the new value was written and containing the
582 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
583 /// ordering of this operation. The first describes the required ordering if the
584 /// operation succeeds while the second describes the required ordering when the
585 /// operation fails. Using [`Acquire`] as success ordering makes the store part
586 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
587 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
588 /// and must be equivalent to or weaker than the success ordering.
590 /// [`bool`]: ../../../std/primitive.bool.html
591 /// [`compare_exchange`]: #method.compare_exchange
592 /// [`Ordering`]: enum.Ordering.html
593 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
594 /// [`Release`]: enum.Ordering.html#variant.Release
595 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
596 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
601 /// use std::sync::atomic::{AtomicBool, Ordering};
603 /// let val = AtomicBool::new(false);
606 /// let mut old = val.load(Ordering::Relaxed);
608 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
610 /// Err(x) => old = x,
615 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
616 #[cfg(target_has_atomic = "8")]
617 pub fn compare_exchange_weak(
623 ) -> Result<bool, bool> {
625 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
628 Err(x) => Err(x != 0),
632 /// Logical "and" with a boolean value.
634 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
635 /// the new value to the result.
637 /// Returns the previous value.
639 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
640 /// of this operation. All ordering modes are possible. Note that using
641 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
642 /// using [`Release`] makes the load part [`Relaxed`].
644 /// [`Ordering`]: enum.Ordering.html
645 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
646 /// [`Release`]: enum.Ordering.html#variant.Release
647 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
652 /// use std::sync::atomic::{AtomicBool, Ordering};
654 /// let foo = AtomicBool::new(true);
655 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
656 /// assert_eq!(foo.load(Ordering::SeqCst), false);
658 /// let foo = AtomicBool::new(true);
659 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
660 /// assert_eq!(foo.load(Ordering::SeqCst), true);
662 /// let foo = AtomicBool::new(false);
663 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
664 /// assert_eq!(foo.load(Ordering::SeqCst), false);
667 #[stable(feature = "rust1", since = "1.0.0")]
668 #[cfg(target_has_atomic = "8")]
669 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
670 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
673 /// Logical "nand" with a boolean value.
675 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
676 /// the new value to the result.
678 /// Returns the previous value.
680 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
681 /// of this operation. All ordering modes are possible. Note that using
682 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
683 /// using [`Release`] makes the load part [`Relaxed`].
685 /// [`Ordering`]: enum.Ordering.html
686 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
687 /// [`Release`]: enum.Ordering.html#variant.Release
688 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
693 /// use std::sync::atomic::{AtomicBool, Ordering};
695 /// let foo = AtomicBool::new(true);
696 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
697 /// assert_eq!(foo.load(Ordering::SeqCst), true);
699 /// let foo = AtomicBool::new(true);
700 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
701 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
702 /// assert_eq!(foo.load(Ordering::SeqCst), false);
704 /// let foo = AtomicBool::new(false);
705 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
706 /// assert_eq!(foo.load(Ordering::SeqCst), true);
709 #[stable(feature = "rust1", since = "1.0.0")]
710 #[cfg(target_has_atomic = "8")]
711 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
712 // We can't use atomic_nand here because it can result in a bool with
713 // an invalid value. This happens because the atomic operation is done
714 // with an 8-bit integer internally, which would set the upper 7 bits.
715 // So we just use fetch_xor or swap instead.
718 // We must invert the bool.
719 self.fetch_xor(true, order)
721 // !(x & false) == true
722 // We must set the bool to true.
723 self.swap(true, order)
727 /// Logical "or" with a boolean value.
729 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
730 /// new value to the result.
732 /// Returns the previous value.
734 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
735 /// of this operation. All ordering modes are possible. Note that using
736 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
737 /// using [`Release`] makes the load part [`Relaxed`].
739 /// [`Ordering`]: enum.Ordering.html
740 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
741 /// [`Release`]: enum.Ordering.html#variant.Release
742 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
747 /// use std::sync::atomic::{AtomicBool, Ordering};
749 /// let foo = AtomicBool::new(true);
750 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
751 /// assert_eq!(foo.load(Ordering::SeqCst), true);
753 /// let foo = AtomicBool::new(true);
754 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
755 /// assert_eq!(foo.load(Ordering::SeqCst), true);
757 /// let foo = AtomicBool::new(false);
758 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
759 /// assert_eq!(foo.load(Ordering::SeqCst), false);
762 #[stable(feature = "rust1", since = "1.0.0")]
763 #[cfg(target_has_atomic = "8")]
764 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
765 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
768 /// Logical "xor" with a boolean value.
770 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
771 /// the new value to the result.
773 /// Returns the previous value.
775 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
776 /// of this operation. All ordering modes are possible. Note that using
777 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
778 /// using [`Release`] makes the load part [`Relaxed`].
780 /// [`Ordering`]: enum.Ordering.html
781 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
782 /// [`Release`]: enum.Ordering.html#variant.Release
783 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
788 /// use std::sync::atomic::{AtomicBool, Ordering};
790 /// let foo = AtomicBool::new(true);
791 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
792 /// assert_eq!(foo.load(Ordering::SeqCst), true);
794 /// let foo = AtomicBool::new(true);
795 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
796 /// assert_eq!(foo.load(Ordering::SeqCst), false);
798 /// let foo = AtomicBool::new(false);
799 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
800 /// assert_eq!(foo.load(Ordering::SeqCst), false);
803 #[stable(feature = "rust1", since = "1.0.0")]
804 #[cfg(target_has_atomic = "8")]
805 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
806 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
809 /// Returns a mutable pointer to the underlying [`bool`].
811 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
812 /// This method is mostly useful for FFI, where the function signature may use
813 /// `*mut bool` instead of `&AtomicBool`.
815 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
816 /// atomic types work with interior mutability. All modifications of an atomic change the value
817 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
818 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
819 /// restriction: operations on it must be atomic.
821 /// [`bool`]: ../../../std/primitive.bool.html
825 /// ```ignore (extern-declaration)
827 /// use std::sync::atomic::AtomicBool;
829 /// fn my_atomic_op(arg: *mut bool);
832 /// let mut atomic = AtomicBool::new(true);
834 /// my_atomic_op(atomic.as_mut_ptr());
839 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
840 pub fn as_mut_ptr(&self) -> *mut bool {
841 self.v.get() as *mut bool
845 #[cfg(target_has_atomic_load_store = "ptr")]
846 impl<T> AtomicPtr<T> {
847 /// Creates a new `AtomicPtr`.
852 /// use std::sync::atomic::AtomicPtr;
854 /// let ptr = &mut 5;
855 /// let atomic_ptr = AtomicPtr::new(ptr);
858 #[stable(feature = "rust1", since = "1.0.0")]
859 #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
860 pub const fn new(p: *mut T) -> AtomicPtr<T> {
861 AtomicPtr { p: UnsafeCell::new(p) }
864 /// Returns a mutable reference to the underlying pointer.
866 /// This is safe because the mutable reference guarantees that no other threads are
867 /// concurrently accessing the atomic data.
872 /// use std::sync::atomic::{AtomicPtr, Ordering};
874 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
875 /// *atomic_ptr.get_mut() = &mut 5;
876 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
879 #[stable(feature = "atomic_access", since = "1.15.0")]
880 pub fn get_mut(&mut self) -> &mut *mut T {
881 unsafe { &mut *self.p.get() }
884 /// Consumes the atomic and returns the contained value.
886 /// This is safe because passing `self` by value guarantees that no other threads are
887 /// concurrently accessing the atomic data.
892 /// use std::sync::atomic::AtomicPtr;
894 /// let atomic_ptr = AtomicPtr::new(&mut 5);
895 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
898 #[stable(feature = "atomic_access", since = "1.15.0")]
899 pub fn into_inner(self) -> *mut T {
903 /// Loads a value from the pointer.
905 /// `load` takes an [`Ordering`] argument which describes the memory ordering
906 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
910 /// Panics if `order` is [`Release`] or [`AcqRel`].
912 /// [`Ordering`]: enum.Ordering.html
913 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
914 /// [`Release`]: enum.Ordering.html#variant.Release
915 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
916 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
917 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
922 /// use std::sync::atomic::{AtomicPtr, Ordering};
924 /// let ptr = &mut 5;
925 /// let some_ptr = AtomicPtr::new(ptr);
927 /// let value = some_ptr.load(Ordering::Relaxed);
930 #[stable(feature = "rust1", since = "1.0.0")]
931 pub fn load(&self, order: Ordering) -> *mut T {
932 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
935 /// Stores a value into the pointer.
937 /// `store` takes an [`Ordering`] argument which describes the memory ordering
938 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
942 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
944 /// [`Ordering`]: enum.Ordering.html
945 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
946 /// [`Release`]: enum.Ordering.html#variant.Release
947 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
948 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
949 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
954 /// use std::sync::atomic::{AtomicPtr, Ordering};
956 /// let ptr = &mut 5;
957 /// let some_ptr = AtomicPtr::new(ptr);
959 /// let other_ptr = &mut 10;
961 /// some_ptr.store(other_ptr, Ordering::Relaxed);
964 #[stable(feature = "rust1", since = "1.0.0")]
965 pub fn store(&self, ptr: *mut T, order: Ordering) {
967 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
971 /// Stores a value into the pointer, returning the previous value.
973 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
974 /// of this operation. All ordering modes are possible. Note that using
975 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
976 /// using [`Release`] makes the load part [`Relaxed`].
978 /// [`Ordering`]: enum.Ordering.html
979 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
980 /// [`Release`]: enum.Ordering.html#variant.Release
981 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
986 /// use std::sync::atomic::{AtomicPtr, Ordering};
988 /// let ptr = &mut 5;
989 /// let some_ptr = AtomicPtr::new(ptr);
991 /// let other_ptr = &mut 10;
993 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
996 #[stable(feature = "rust1", since = "1.0.0")]
997 #[cfg(target_has_atomic = "ptr")]
998 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
999 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1002 /// Stores a value into the pointer if the current value is the same as the `current` value.
1004 /// The return value is always the previous value. If it is equal to `current`, then the value
1007 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1008 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1009 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1010 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1011 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1013 /// [`Ordering`]: enum.Ordering.html
1014 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1015 /// [`Release`]: enum.Ordering.html#variant.Release
1016 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1017 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1022 /// use std::sync::atomic::{AtomicPtr, Ordering};
1024 /// let ptr = &mut 5;
1025 /// let some_ptr = AtomicPtr::new(ptr);
1027 /// let other_ptr = &mut 10;
1029 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1032 #[stable(feature = "rust1", since = "1.0.0")]
1033 #[cfg(target_has_atomic = "ptr")]
1034 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1035 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1041 /// Stores a value into the pointer if the current value is the same as the `current` value.
1043 /// The return value is a result indicating whether the new value was written and containing
1044 /// the previous value. On success this value is guaranteed to be equal to `current`.
1046 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1047 /// ordering of this operation. The first describes the required ordering if the
1048 /// operation succeeds while the second describes the required ordering when the
1049 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1050 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1051 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1052 /// and must be equivalent to or weaker than the success ordering.
1054 /// [`Ordering`]: enum.Ordering.html
1055 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1056 /// [`Release`]: enum.Ordering.html#variant.Release
1057 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1058 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1063 /// use std::sync::atomic::{AtomicPtr, Ordering};
1065 /// let ptr = &mut 5;
1066 /// let some_ptr = AtomicPtr::new(ptr);
1068 /// let other_ptr = &mut 10;
1070 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1071 /// Ordering::SeqCst, Ordering::Relaxed);
1074 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1075 #[cfg(target_has_atomic = "ptr")]
1076 pub fn compare_exchange(
1082 ) -> Result<*mut T, *mut T> {
1084 let res = atomic_compare_exchange(
1085 self.p.get() as *mut usize,
1092 Ok(x) => Ok(x as *mut T),
1093 Err(x) => Err(x as *mut T),
1098 /// Stores a value into the pointer if the current value is the same as the `current` value.
1100 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1101 /// comparison succeeds, which can result in more efficient code on some platforms. The
1102 /// return value is a result indicating whether the new value was written and containing the
1105 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1106 /// ordering of this operation. The first describes the required ordering if the
1107 /// operation succeeds while the second describes the required ordering when the
1108 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1109 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1110 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1111 /// and must be equivalent to or weaker than the success ordering.
1113 /// [`compare_exchange`]: #method.compare_exchange
1114 /// [`Ordering`]: enum.Ordering.html
1115 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1116 /// [`Release`]: enum.Ordering.html#variant.Release
1117 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1118 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1123 /// use std::sync::atomic::{AtomicPtr, Ordering};
1125 /// let some_ptr = AtomicPtr::new(&mut 5);
1127 /// let new = &mut 10;
1128 /// let mut old = some_ptr.load(Ordering::Relaxed);
1130 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1132 /// Err(x) => old = x,
1137 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1138 #[cfg(target_has_atomic = "ptr")]
1139 pub fn compare_exchange_weak(
1145 ) -> Result<*mut T, *mut T> {
1147 let res = atomic_compare_exchange_weak(
1148 self.p.get() as *mut usize,
1155 Ok(x) => Ok(x as *mut T),
1156 Err(x) => Err(x as *mut T),
1162 #[cfg(target_has_atomic_load_store = "8")]
1163 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1164 impl From<bool> for AtomicBool {
1165 /// Converts a `bool` into an `AtomicBool`.
1170 /// use std::sync::atomic::AtomicBool;
1171 /// let atomic_bool = AtomicBool::from(true);
1172 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1175 fn from(b: bool) -> Self {
1180 #[cfg(target_has_atomic_load_store = "ptr")]
1181 #[stable(feature = "atomic_from", since = "1.23.0")]
1182 impl<T> From<*mut T> for AtomicPtr<T> {
1184 fn from(p: *mut T) -> Self {
1189 #[cfg(target_has_atomic_load_store = "8")]
1190 macro_rules! atomic_int {
1195 $stable_access:meta,
1199 $stable_init_const:meta,
1200 $s_int_type:expr, $int_ref:expr,
1201 $extra_feature:expr,
1202 $min_fn:ident, $max_fn:ident,
1205 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1206 /// An integer type which can be safely shared between threads.
1208 /// This type has the same in-memory representation as the underlying
1209 /// integer type, [`
1210 #[doc = $s_int_type]
1213 /// ). For more about the differences between atomic types and
1214 /// non-atomic types as well as information about the portability of
1215 /// this type, please see the [module-level documentation].
1217 /// [module-level documentation]: index.html
1219 #[repr(C, align($align))]
1220 pub struct $atomic_type {
1221 v: UnsafeCell<$int_type>,
1224 /// An atomic integer initialized to `0`.
1225 #[$stable_init_const]
1228 reason = "the `new` function is now preferred",
1229 suggestion = $atomic_new,
1231 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1234 impl Default for $atomic_type {
1235 fn default() -> Self {
1236 Self::new(Default::default())
1241 impl From<$int_type> for $atomic_type {
1244 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1246 fn from(v: $int_type) -> Self { Self::new(v) }
1251 impl fmt::Debug for $atomic_type {
1252 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1253 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1257 // Send is implicitly implemented.
1259 unsafe impl Sync for $atomic_type {}
1263 concat!("Creates a new atomic integer.
1268 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1270 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1275 pub const fn new(v: $int_type) -> Self {
1276 Self {v: UnsafeCell::new(v)}
1281 concat!("Returns a mutable reference to the underlying integer.
1283 This is safe because the mutable reference guarantees that no other threads are
1284 concurrently accessing the atomic data.
1289 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1291 let mut some_var = ", stringify!($atomic_type), "::new(10);
1292 assert_eq!(*some_var.get_mut(), 10);
1293 *some_var.get_mut() = 5;
1294 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1298 pub fn get_mut(&mut self) -> &mut $int_type {
1299 unsafe { &mut *self.v.get() }
1304 concat!("Consumes the atomic and returns the contained value.
1306 This is safe because passing `self` by value guarantees that no other threads are
1307 concurrently accessing the atomic data.
1312 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1314 let some_var = ", stringify!($atomic_type), "::new(5);
1315 assert_eq!(some_var.into_inner(), 5);
1319 pub fn into_inner(self) -> $int_type {
1325 concat!("Loads a value from the atomic integer.
1327 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1328 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1332 Panics if `order` is [`Release`] or [`AcqRel`].
1334 [`Ordering`]: enum.Ordering.html
1335 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1336 [`Release`]: enum.Ordering.html#variant.Release
1337 [`Acquire`]: enum.Ordering.html#variant.Acquire
1338 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1339 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1344 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1346 let some_var = ", stringify!($atomic_type), "::new(5);
1348 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1352 pub fn load(&self, order: Ordering) -> $int_type {
1353 unsafe { atomic_load(self.v.get(), order) }
1358 concat!("Stores a value into the atomic integer.
1360 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1361 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1365 Panics if `order` is [`Acquire`] or [`AcqRel`].
1367 [`Ordering`]: enum.Ordering.html
1368 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1369 [`Release`]: enum.Ordering.html#variant.Release
1370 [`Acquire`]: enum.Ordering.html#variant.Acquire
1371 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1372 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1377 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1379 let some_var = ", stringify!($atomic_type), "::new(5);
1381 some_var.store(10, Ordering::Relaxed);
1382 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1386 pub fn store(&self, val: $int_type, order: Ordering) {
1387 unsafe { atomic_store(self.v.get(), val, order); }
1392 concat!("Stores a value into the atomic integer, returning the previous value.
1394 `swap` takes an [`Ordering`] argument which describes the memory ordering
1395 of this operation. All ordering modes are possible. Note that using
1396 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1397 using [`Release`] makes the load part [`Relaxed`].
1399 [`Ordering`]: enum.Ordering.html
1400 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1401 [`Release`]: enum.Ordering.html#variant.Release
1402 [`Acquire`]: enum.Ordering.html#variant.Acquire
1407 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1409 let some_var = ", stringify!($atomic_type), "::new(5);
1411 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1416 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1417 unsafe { atomic_swap(self.v.get(), val, order) }
1422 concat!("Stores a value into the atomic integer if the current value is the same as
1423 the `current` value.
1425 The return value is always the previous value. If it is equal to `current`, then the
1428 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1429 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1430 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1431 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1432 happens, and using [`Release`] makes the load part [`Relaxed`].
1434 [`Ordering`]: enum.Ordering.html
1435 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1436 [`Release`]: enum.Ordering.html#variant.Release
1437 [`Acquire`]: enum.Ordering.html#variant.Acquire
1438 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1443 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1445 let some_var = ", stringify!($atomic_type), "::new(5);
1447 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1448 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1450 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1451 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1456 pub fn compare_and_swap(&self,
1459 order: Ordering) -> $int_type {
1460 match self.compare_exchange(current,
1463 strongest_failure_ordering(order)) {
1471 concat!("Stores a value into the atomic integer if the current value is the same as
1472 the `current` value.
1474 The return value is a result indicating whether the new value was written and
1475 containing the previous value. On success this value is guaranteed to be equal to
1478 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1479 ordering of this operation. The first describes the required ordering if the
1480 operation succeeds while the second describes the required ordering when the
1481 operation fails. Using [`Acquire`] as success ordering makes the store part
1482 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1483 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1484 and must be equivalent to or weaker than the success ordering.
1486 [`Ordering`]: enum.Ordering.html
1487 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1488 [`Release`]: enum.Ordering.html#variant.Release
1489 [`Acquire`]: enum.Ordering.html#variant.Acquire
1490 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1495 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1497 let some_var = ", stringify!($atomic_type), "::new(5);
1499 assert_eq!(some_var.compare_exchange(5, 10,
1503 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1505 assert_eq!(some_var.compare_exchange(6, 12,
1509 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1514 pub fn compare_exchange(&self,
1518 failure: Ordering) -> Result<$int_type, $int_type> {
1519 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1524 concat!("Stores a value into the atomic integer if the current value is the same as
1525 the `current` value.
1527 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1528 when the comparison succeeds, which can result in more efficient code on some
1529 platforms. The return value is a result indicating whether the new value was
1530 written and containing the previous value.
1532 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1533 ordering of this operation. The first describes the required ordering if the
1534 operation succeeds while the second describes the required ordering when the
1535 operation fails. Using [`Acquire`] as success ordering makes the store part
1536 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1537 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1538 and must be equivalent to or weaker than the success ordering.
1540 [`compare_exchange`]: #method.compare_exchange
1541 [`Ordering`]: enum.Ordering.html
1542 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1543 [`Release`]: enum.Ordering.html#variant.Release
1544 [`Acquire`]: enum.Ordering.html#variant.Acquire
1545 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1550 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1552 let val = ", stringify!($atomic_type), "::new(4);
1554 let mut old = val.load(Ordering::Relaxed);
1557 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1566 pub fn compare_exchange_weak(&self,
1570 failure: Ordering) -> Result<$int_type, $int_type> {
1572 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1578 concat!("Adds to the current value, returning the previous value.
1580 This operation wraps around on overflow.
1582 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1583 of this operation. All ordering modes are possible. Note that using
1584 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1585 using [`Release`] makes the load part [`Relaxed`].
1587 [`Ordering`]: enum.Ordering.html
1588 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1589 [`Release`]: enum.Ordering.html#variant.Release
1590 [`Acquire`]: enum.Ordering.html#variant.Acquire
1595 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1597 let foo = ", stringify!($atomic_type), "::new(0);
1598 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1599 assert_eq!(foo.load(Ordering::SeqCst), 10);
1604 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1605 unsafe { atomic_add(self.v.get(), val, order) }
1610 concat!("Subtracts from the current value, returning the previous value.
1612 This operation wraps around on overflow.
1614 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1615 of this operation. All ordering modes are possible. Note that using
1616 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1617 using [`Release`] makes the load part [`Relaxed`].
1619 [`Ordering`]: enum.Ordering.html
1620 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1621 [`Release`]: enum.Ordering.html#variant.Release
1622 [`Acquire`]: enum.Ordering.html#variant.Acquire
1627 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1629 let foo = ", stringify!($atomic_type), "::new(20);
1630 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1631 assert_eq!(foo.load(Ordering::SeqCst), 10);
1636 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1637 unsafe { atomic_sub(self.v.get(), val, order) }
1642 concat!("Bitwise \"and\" with the current value.
1644 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1645 sets the new value to the result.
1647 Returns the previous value.
1649 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1650 of this operation. All ordering modes are possible. Note that using
1651 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1652 using [`Release`] makes the load part [`Relaxed`].
1654 [`Ordering`]: enum.Ordering.html
1655 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1656 [`Release`]: enum.Ordering.html#variant.Release
1657 [`Acquire`]: enum.Ordering.html#variant.Acquire
1662 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1664 let foo = ", stringify!($atomic_type), "::new(0b101101);
1665 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1666 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1671 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1672 unsafe { atomic_and(self.v.get(), val, order) }
1677 concat!("Bitwise \"nand\" with the current value.
1679 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1680 sets the new value to the result.
1682 Returns the previous value.
1684 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1685 of this operation. All ordering modes are possible. Note that using
1686 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1687 using [`Release`] makes the load part [`Relaxed`].
1689 [`Ordering`]: enum.Ordering.html
1690 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1691 [`Release`]: enum.Ordering.html#variant.Release
1692 [`Acquire`]: enum.Ordering.html#variant.Acquire
1697 ", $extra_feature, "
1698 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1700 let foo = ", stringify!($atomic_type), "::new(0x13);
1701 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1702 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1707 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1708 unsafe { atomic_nand(self.v.get(), val, order) }
1713 concat!("Bitwise \"or\" with the current value.
1715 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1716 sets the new value to the result.
1718 Returns the previous value.
1720 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1721 of this operation. All ordering modes are possible. Note that using
1722 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1723 using [`Release`] makes the load part [`Relaxed`].
1725 [`Ordering`]: enum.Ordering.html
1726 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1727 [`Release`]: enum.Ordering.html#variant.Release
1728 [`Acquire`]: enum.Ordering.html#variant.Acquire
1733 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1735 let foo = ", stringify!($atomic_type), "::new(0b101101);
1736 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1737 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1742 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1743 unsafe { atomic_or(self.v.get(), val, order) }
1748 concat!("Bitwise \"xor\" with the current value.
1750 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1751 sets the new value to the result.
1753 Returns the previous value.
1755 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1756 of this operation. All ordering modes are possible. Note that using
1757 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1758 using [`Release`] makes the load part [`Relaxed`].
1760 [`Ordering`]: enum.Ordering.html
1761 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1762 [`Release`]: enum.Ordering.html#variant.Release
1763 [`Acquire`]: enum.Ordering.html#variant.Acquire
1768 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1770 let foo = ", stringify!($atomic_type), "::new(0b101101);
1771 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1772 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1777 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1778 unsafe { atomic_xor(self.v.get(), val, order) }
1783 concat!("Fetches the value, and applies a function to it that returns an optional
1784 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1785 `Err(previous_value)`.
1787 Note: This may call the function multiple times if the value has been changed from other threads in
1788 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1789 but once to the stored value.
1791 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1792 ordering of this operation. The first describes the required ordering for loads
1793 and failed updates while the second describes the required ordering when the
1794 operation finally succeeds. Beware that this is different from the two
1795 modes in [`compare_exchange`]!
1797 Using [`Acquire`] as success ordering makes the store part
1798 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1799 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1800 and must be equivalent to or weaker than the success ordering.
1802 [`bool`]: ../../../std/primitive.bool.html
1803 [`compare_exchange`]: #method.compare_exchange
1804 [`Ordering`]: enum.Ordering.html
1805 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1806 [`Release`]: enum.Ordering.html#variant.Release
1807 [`Acquire`]: enum.Ordering.html#variant.Acquire
1808 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1813 #![feature(no_more_cas)]
1814 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1816 let x = ", stringify!($atomic_type), "::new(7);
1817 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1818 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1819 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1820 assert_eq!(x.load(Ordering::SeqCst), 9);
1823 #[unstable(feature = "no_more_cas",
1824 reason = "no more CAS loops in user code",
1827 pub fn fetch_update<F>(&self,
1829 fetch_order: Ordering,
1830 set_order: Ordering) -> Result<$int_type, $int_type>
1831 where F: FnMut($int_type) -> Option<$int_type> {
1832 let mut prev = self.load(fetch_order);
1833 while let Some(next) = f(prev) {
1834 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1835 x @ Ok(_) => return x,
1836 Err(next_prev) => prev = next_prev
1844 concat!("Maximum with the current value.
1846 Finds the maximum of the current value and the argument `val`, and
1847 sets the new value to the result.
1849 Returns the previous value.
1851 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1852 of this operation. All ordering modes are possible. Note that using
1853 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1854 using [`Release`] makes the load part [`Relaxed`].
1856 [`Ordering`]: enum.Ordering.html
1857 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1858 [`Release`]: enum.Ordering.html#variant.Release
1859 [`Acquire`]: enum.Ordering.html#variant.Acquire
1864 #![feature(atomic_min_max)]
1865 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1867 let foo = ", stringify!($atomic_type), "::new(23);
1868 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1869 assert_eq!(foo.load(Ordering::SeqCst), 42);
1872 If you want to obtain the maximum value in one step, you can use the following:
1875 #![feature(atomic_min_max)]
1876 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1878 let foo = ", stringify!($atomic_type), "::new(23);
1880 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1881 assert!(max_foo == 42);
1884 #[unstable(feature = "atomic_min_max",
1885 reason = "easier and faster min/max than writing manual CAS loop",
1888 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1889 unsafe { $max_fn(self.v.get(), val, order) }
1894 concat!("Minimum with the current value.
1896 Finds the minimum of the current value and the argument `val`, and
1897 sets the new value to the result.
1899 Returns the previous value.
1901 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1902 of this operation. All ordering modes are possible. Note that using
1903 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1904 using [`Release`] makes the load part [`Relaxed`].
1906 [`Ordering`]: enum.Ordering.html
1907 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1908 [`Release`]: enum.Ordering.html#variant.Release
1909 [`Acquire`]: enum.Ordering.html#variant.Acquire
1914 #![feature(atomic_min_max)]
1915 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1917 let foo = ", stringify!($atomic_type), "::new(23);
1918 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1919 assert_eq!(foo.load(Ordering::Relaxed), 23);
1920 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1921 assert_eq!(foo.load(Ordering::Relaxed), 22);
1924 If you want to obtain the minimum value in one step, you can use the following:
1927 #![feature(atomic_min_max)]
1928 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1930 let foo = ", stringify!($atomic_type), "::new(23);
1932 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1933 assert_eq!(min_foo, 12);
1936 #[unstable(feature = "atomic_min_max",
1937 reason = "easier and faster min/max than writing manual CAS loop",
1940 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1941 unsafe { $min_fn(self.v.get(), val, order) }
1946 concat!("Returns a mutable pointer to the underlying integer.
1948 Doing non-atomic reads and writes on the resulting integer can be a data race.
1949 This method is mostly useful for FFI, where the function signature may use
1950 `*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
1952 Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1953 atomic types work with interior mutability. All modifications of an atomic change the value
1954 through a shared reference, and can do so safely as long as they use atomic operations. Any
1955 use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
1956 restriction: operations on it must be atomic.
1960 ```ignore (extern-declaration)
1962 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1965 fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
1968 let mut atomic = ", stringify!($atomic_type), "::new(1);
1970 my_atomic_op(atomic.as_mut_ptr());
1975 #[unstable(feature = "atomic_mut_ptr",
1976 reason = "recently added",
1978 pub fn as_mut_ptr(&self) -> *mut $int_type {
1986 #[cfg(target_has_atomic_load_store = "8")]
1988 cfg(target_has_atomic = "8"),
1989 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1990 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1991 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1992 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1993 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1994 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1995 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
1996 unstable(feature = "integer_atomics", issue = "32976"),
1997 "i8", "../../../std/primitive.i8.html",
1999 atomic_min, atomic_max,
2002 i8 AtomicI8 ATOMIC_I8_INIT
2004 #[cfg(target_has_atomic_load_store = "8")]
2006 cfg(target_has_atomic = "8"),
2007 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2008 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2009 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2010 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2011 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2012 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2013 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2014 unstable(feature = "integer_atomics", issue = "32976"),
2015 "u8", "../../../std/primitive.u8.html",
2017 atomic_umin, atomic_umax,
2020 u8 AtomicU8 ATOMIC_U8_INIT
2022 #[cfg(target_has_atomic_load_store = "16")]
2024 cfg(target_has_atomic = "16"),
2025 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2026 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2027 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2028 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2029 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2030 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2031 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2032 unstable(feature = "integer_atomics", issue = "32976"),
2033 "i16", "../../../std/primitive.i16.html",
2035 atomic_min, atomic_max,
2037 "AtomicI16::new(0)",
2038 i16 AtomicI16 ATOMIC_I16_INIT
2040 #[cfg(target_has_atomic_load_store = "16")]
2042 cfg(target_has_atomic = "16"),
2043 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2044 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2045 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2046 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2047 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2048 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2049 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2050 unstable(feature = "integer_atomics", issue = "32976"),
2051 "u16", "../../../std/primitive.u16.html",
2053 atomic_umin, atomic_umax,
2055 "AtomicU16::new(0)",
2056 u16 AtomicU16 ATOMIC_U16_INIT
2058 #[cfg(target_has_atomic_load_store = "32")]
2060 cfg(target_has_atomic = "32"),
2061 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2062 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2063 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2064 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2065 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2066 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2067 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2068 unstable(feature = "integer_atomics", issue = "32976"),
2069 "i32", "../../../std/primitive.i32.html",
2071 atomic_min, atomic_max,
2073 "AtomicI32::new(0)",
2074 i32 AtomicI32 ATOMIC_I32_INIT
2076 #[cfg(target_has_atomic_load_store = "32")]
2078 cfg(target_has_atomic = "32"),
2079 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2080 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2081 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2082 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2083 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2084 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2085 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2086 unstable(feature = "integer_atomics", issue = "32976"),
2087 "u32", "../../../std/primitive.u32.html",
2089 atomic_umin, atomic_umax,
2091 "AtomicU32::new(0)",
2092 u32 AtomicU32 ATOMIC_U32_INIT
2094 #[cfg(target_has_atomic_load_store = "64")]
2096 cfg(target_has_atomic = "64"),
2097 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2098 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2099 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2100 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2101 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2102 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2103 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2104 unstable(feature = "integer_atomics", issue = "32976"),
2105 "i64", "../../../std/primitive.i64.html",
2107 atomic_min, atomic_max,
2109 "AtomicI64::new(0)",
2110 i64 AtomicI64 ATOMIC_I64_INIT
2112 #[cfg(target_has_atomic_load_store = "64")]
2114 cfg(target_has_atomic = "64"),
2115 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2116 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2117 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2118 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2119 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2120 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2121 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2122 unstable(feature = "integer_atomics", issue = "32976"),
2123 "u64", "../../../std/primitive.u64.html",
2125 atomic_umin, atomic_umax,
2127 "AtomicU64::new(0)",
2128 u64 AtomicU64 ATOMIC_U64_INIT
2130 #[cfg(target_has_atomic_load_store = "128")]
2132 cfg(target_has_atomic = "128"),
2133 unstable(feature = "integer_atomics", issue = "32976"),
2134 unstable(feature = "integer_atomics", issue = "32976"),
2135 unstable(feature = "integer_atomics", issue = "32976"),
2136 unstable(feature = "integer_atomics", issue = "32976"),
2137 unstable(feature = "integer_atomics", issue = "32976"),
2138 unstable(feature = "integer_atomics", issue = "32976"),
2139 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2140 unstable(feature = "integer_atomics", issue = "32976"),
2141 "i128", "../../../std/primitive.i128.html",
2142 "#![feature(integer_atomics)]\n\n",
2143 atomic_min, atomic_max,
2145 "AtomicI128::new(0)",
2146 i128 AtomicI128 ATOMIC_I128_INIT
2148 #[cfg(target_has_atomic_load_store = "128")]
2150 cfg(target_has_atomic = "128"),
2151 unstable(feature = "integer_atomics", issue = "32976"),
2152 unstable(feature = "integer_atomics", issue = "32976"),
2153 unstable(feature = "integer_atomics", issue = "32976"),
2154 unstable(feature = "integer_atomics", issue = "32976"),
2155 unstable(feature = "integer_atomics", issue = "32976"),
2156 unstable(feature = "integer_atomics", issue = "32976"),
2157 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2158 unstable(feature = "integer_atomics", issue = "32976"),
2159 "u128", "../../../std/primitive.u128.html",
2160 "#![feature(integer_atomics)]\n\n",
2161 atomic_umin, atomic_umax,
2163 "AtomicU128::new(0)",
2164 u128 AtomicU128 ATOMIC_U128_INIT
2166 #[cfg(target_has_atomic_load_store = "ptr")]
2167 #[cfg(target_pointer_width = "16")]
2168 macro_rules! ptr_width {
2173 #[cfg(target_has_atomic_load_store = "ptr")]
2174 #[cfg(target_pointer_width = "32")]
2175 macro_rules! ptr_width {
2180 #[cfg(target_has_atomic_load_store = "ptr")]
2181 #[cfg(target_pointer_width = "64")]
2182 macro_rules! ptr_width {
2187 #[cfg(target_has_atomic_load_store = "ptr")]
2189 cfg(target_has_atomic = "ptr"),
2190 stable(feature = "rust1", since = "1.0.0"),
2191 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2192 stable(feature = "atomic_debug", since = "1.3.0"),
2193 stable(feature = "atomic_access", since = "1.15.0"),
2194 stable(feature = "atomic_from", since = "1.23.0"),
2195 stable(feature = "atomic_nand", since = "1.27.0"),
2196 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2197 stable(feature = "rust1", since = "1.0.0"),
2198 "isize", "../../../std/primitive.isize.html",
2200 atomic_min, atomic_max,
2202 "AtomicIsize::new(0)",
2203 isize AtomicIsize ATOMIC_ISIZE_INIT
2205 #[cfg(target_has_atomic_load_store = "ptr")]
2207 cfg(target_has_atomic = "ptr"),
2208 stable(feature = "rust1", since = "1.0.0"),
2209 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2210 stable(feature = "atomic_debug", since = "1.3.0"),
2211 stable(feature = "atomic_access", since = "1.15.0"),
2212 stable(feature = "atomic_from", since = "1.23.0"),
2213 stable(feature = "atomic_nand", since = "1.27.0"),
2214 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
2215 stable(feature = "rust1", since = "1.0.0"),
2216 "usize", "../../../std/primitive.usize.html",
2218 atomic_umin, atomic_umax,
2220 "AtomicUsize::new(0)",
2221 usize AtomicUsize ATOMIC_USIZE_INIT
2225 #[cfg(target_has_atomic = "8")]
2226 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2237 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2239 Release => intrinsics::atomic_store_rel(dst, val),
2240 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2241 SeqCst => intrinsics::atomic_store(dst, val),
2242 Acquire => panic!("there is no such thing as an acquire store"),
2243 AcqRel => panic!("there is no such thing as an acquire/release store"),
2248 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2250 Acquire => intrinsics::atomic_load_acq(dst),
2251 Relaxed => intrinsics::atomic_load_relaxed(dst),
2252 SeqCst => intrinsics::atomic_load(dst),
2253 Release => panic!("there is no such thing as a release load"),
2254 AcqRel => panic!("there is no such thing as an acquire/release load"),
2259 #[cfg(target_has_atomic = "8")]
2260 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2262 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2263 Release => intrinsics::atomic_xchg_rel(dst, val),
2264 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2265 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2266 SeqCst => intrinsics::atomic_xchg(dst, val),
2270 /// Returns the previous value (like __sync_fetch_and_add).
2272 #[cfg(target_has_atomic = "8")]
2273 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2275 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2276 Release => intrinsics::atomic_xadd_rel(dst, val),
2277 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2278 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2279 SeqCst => intrinsics::atomic_xadd(dst, val),
2283 /// Returns the previous value (like __sync_fetch_and_sub).
2285 #[cfg(target_has_atomic = "8")]
2286 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2288 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2289 Release => intrinsics::atomic_xsub_rel(dst, val),
2290 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2291 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2292 SeqCst => intrinsics::atomic_xsub(dst, val),
2297 #[cfg(target_has_atomic = "8")]
2298 unsafe fn atomic_compare_exchange<T>(
2305 let (val, ok) = match (success, failure) {
2306 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2307 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2308 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2309 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2310 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2311 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2312 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2313 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2314 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2315 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2316 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2317 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2319 if ok { Ok(val) } else { Err(val) }
2323 #[cfg(target_has_atomic = "8")]
2324 unsafe fn atomic_compare_exchange_weak<T>(
2331 let (val, ok) = match (success, failure) {
2332 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2333 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2334 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2335 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2336 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2337 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2338 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2339 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2340 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2341 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2342 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2343 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2345 if ok { Ok(val) } else { Err(val) }
2349 #[cfg(target_has_atomic = "8")]
2350 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2352 Acquire => intrinsics::atomic_and_acq(dst, val),
2353 Release => intrinsics::atomic_and_rel(dst, val),
2354 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2355 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2356 SeqCst => intrinsics::atomic_and(dst, val),
2361 #[cfg(target_has_atomic = "8")]
2362 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2364 Acquire => intrinsics::atomic_nand_acq(dst, val),
2365 Release => intrinsics::atomic_nand_rel(dst, val),
2366 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2367 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2368 SeqCst => intrinsics::atomic_nand(dst, val),
2373 #[cfg(target_has_atomic = "8")]
2374 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2376 Acquire => intrinsics::atomic_or_acq(dst, val),
2377 Release => intrinsics::atomic_or_rel(dst, val),
2378 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2379 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2380 SeqCst => intrinsics::atomic_or(dst, val),
2385 #[cfg(target_has_atomic = "8")]
2386 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2388 Acquire => intrinsics::atomic_xor_acq(dst, val),
2389 Release => intrinsics::atomic_xor_rel(dst, val),
2390 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2391 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2392 SeqCst => intrinsics::atomic_xor(dst, val),
2396 /// returns the max value (signed comparison)
2398 #[cfg(target_has_atomic = "8")]
2399 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2401 Acquire => intrinsics::atomic_max_acq(dst, val),
2402 Release => intrinsics::atomic_max_rel(dst, val),
2403 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2404 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2405 SeqCst => intrinsics::atomic_max(dst, val),
2409 /// returns the min value (signed comparison)
2411 #[cfg(target_has_atomic = "8")]
2412 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2414 Acquire => intrinsics::atomic_min_acq(dst, val),
2415 Release => intrinsics::atomic_min_rel(dst, val),
2416 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2417 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2418 SeqCst => intrinsics::atomic_min(dst, val),
2422 /// returns the max value (signed comparison)
2424 #[cfg(target_has_atomic = "8")]
2425 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2427 Acquire => intrinsics::atomic_umax_acq(dst, val),
2428 Release => intrinsics::atomic_umax_rel(dst, val),
2429 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2430 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2431 SeqCst => intrinsics::atomic_umax(dst, val),
2435 /// returns the min value (signed comparison)
2437 #[cfg(target_has_atomic = "8")]
2438 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2440 Acquire => intrinsics::atomic_umin_acq(dst, val),
2441 Release => intrinsics::atomic_umin_rel(dst, val),
2442 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2443 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2444 SeqCst => intrinsics::atomic_umin(dst, val),
2448 /// An atomic fence.
2450 /// Depending on the specified order, a fence prevents the compiler and CPU from
2451 /// reordering certain types of memory operations around it.
2452 /// That creates synchronizes-with relationships between it and atomic operations
2453 /// or fences in other threads.
2455 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2456 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2457 /// exist operations X and Y, both operating on some atomic object 'M' such
2458 /// that A is sequenced before X, Y is synchronized before B and Y observes
2459 /// the change to M. This provides a happens-before dependence between A and B.
2462 /// Thread 1 Thread 2
2464 /// fence(Release); A --------------
2465 /// x.store(3, Relaxed); X --------- |
2468 /// -------------> Y if x.load(Relaxed) == 3 {
2469 /// |-------> B fence(Acquire);
2474 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2477 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2478 /// and [`Release`] semantics, participates in the global program order of the
2479 /// other [`SeqCst`] operations and/or fences.
2481 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2485 /// Panics if `order` is [`Relaxed`].
2490 /// use std::sync::atomic::AtomicBool;
2491 /// use std::sync::atomic::fence;
2492 /// use std::sync::atomic::Ordering;
2494 /// // A mutual exclusion primitive based on spinlock.
2495 /// pub struct Mutex {
2496 /// flag: AtomicBool,
2500 /// pub fn new() -> Mutex {
2502 /// flag: AtomicBool::new(false),
2506 /// pub fn lock(&self) {
2507 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2508 /// // This fence synchronizes-with store in `unlock`.
2509 /// fence(Ordering::Acquire);
2512 /// pub fn unlock(&self) {
2513 /// self.flag.store(false, Ordering::Release);
2518 /// [`Ordering`]: enum.Ordering.html
2519 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2520 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2521 /// [`Release`]: enum.Ordering.html#variant.Release
2522 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2523 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2525 #[stable(feature = "rust1", since = "1.0.0")]
2526 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2527 pub fn fence(order: Ordering) {
2528 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2529 // they will cause LLVM to abort. The wasm instruction set doesn't have
2530 // fences right now. There's discussion online about the best way for tools
2531 // to conventionally implement fences at
2532 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2533 // follow that discussion and implement a solution when one comes about!
2534 #[cfg(not(target_arch = "wasm32"))]
2537 Acquire => intrinsics::atomic_fence_acq(),
2538 Release => intrinsics::atomic_fence_rel(),
2539 AcqRel => intrinsics::atomic_fence_acqrel(),
2540 SeqCst => intrinsics::atomic_fence(),
2541 Relaxed => panic!("there is no such thing as a relaxed fence"),
2546 /// A compiler memory fence.
2548 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2549 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2550 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2551 /// or writes from before or after the call to the other side of the call to
2552 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2553 /// from doing such re-ordering. This is not a problem in a single-threaded,
2554 /// execution context, but when other threads may modify memory at the same
2555 /// time, stronger synchronization primitives such as [`fence`] are required.
2557 /// The re-ordering prevented by the different ordering semantics are:
2559 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2560 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2561 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2562 /// - with [`AcqRel`], both of the above rules are enforced.
2564 /// `compiler_fence` is generally only useful for preventing a thread from
2565 /// racing *with itself*. That is, if a given thread is executing one piece
2566 /// of code, and is then interrupted, and starts executing code elsewhere
2567 /// (while still in the same thread, and conceptually still on the same
2568 /// core). In traditional programs, this can only occur when a signal
2569 /// handler is registered. In more low-level code, such situations can also
2570 /// arise when handling interrupts, when implementing green threads with
2571 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2572 /// discussion of [memory barriers].
2576 /// Panics if `order` is [`Relaxed`].
2580 /// Without `compiler_fence`, the `assert_eq!` in following code
2581 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2582 /// To see why, remember that the compiler is free to swap the stores to
2583 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2584 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2585 /// after `IS_READY` is updated, then the signal handler will see
2586 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2587 /// Using a `compiler_fence` remedies this situation.
2590 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2591 /// use std::sync::atomic::Ordering;
2592 /// use std::sync::atomic::compiler_fence;
2594 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2595 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2598 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2599 /// // prevent earlier writes from being moved beyond this point
2600 /// compiler_fence(Ordering::Release);
2601 /// IS_READY.store(true, Ordering::Relaxed);
2604 /// fn signal_handler() {
2605 /// if IS_READY.load(Ordering::Relaxed) {
2606 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2611 /// [`fence`]: fn.fence.html
2612 /// [`Ordering`]: enum.Ordering.html
2613 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2614 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2615 /// [`Release`]: enum.Ordering.html#variant.Release
2616 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2617 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2618 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2620 #[stable(feature = "compiler_fences", since = "1.21.0")]
2621 pub fn compiler_fence(order: Ordering) {
2624 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2625 Release => intrinsics::atomic_singlethreadfence_rel(),
2626 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2627 SeqCst => intrinsics::atomic_singlethreadfence(),
2628 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2633 #[cfg(target_has_atomic_load_store = "8")]
2634 #[stable(feature = "atomic_debug", since = "1.3.0")]
2635 impl fmt::Debug for AtomicBool {
2636 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2637 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2641 #[cfg(target_has_atomic_load_store = "ptr")]
2642 #[stable(feature = "atomic_debug", since = "1.3.0")]
2643 impl<T> fmt::Debug for AtomicPtr<T> {
2644 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2645 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2649 #[cfg(target_has_atomic_load_store = "ptr")]
2650 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2651 impl<T> fmt::Pointer for AtomicPtr<T> {
2652 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2653 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)