3 //! Atomic types provide primitive shared-memory communication between
4 //! threads, and are the building blocks of other concurrent
7 //! This module defines atomic versions of a select number of primitive
8 //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9 //! [`AtomicI8`], [`AtomicU16`], etc.
10 //! Atomic types present operations that, when used correctly, synchronize
11 //! updates between threads.
13 //! [`AtomicBool`]: struct.AtomicBool.html
14 //! [`AtomicIsize`]: struct.AtomicIsize.html
15 //! [`AtomicUsize`]: struct.AtomicUsize.html
16 //! [`AtomicI8`]: struct.AtomicI8.html
17 //! [`AtomicU16`]: struct.AtomicU16.html
19 //! Each method takes an [`Ordering`] which represents the strength of
20 //! the memory barrier for that operation. These orderings are the
21 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
23 //! [`Ordering`]: enum.Ordering.html
25 //! [1]: https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
26 //! [2]: ../../../nomicon/atomics.html
28 //! Atomic variables are safe to share between threads (they implement [`Sync`])
29 //! but they do not themselves provide the mechanism for sharing and follow the
30 //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
31 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
32 //! atomically-reference-counted shared pointer).
34 //! [`Sync`]: ../../marker/trait.Sync.html
35 //! [arc]: ../../../std/sync/struct.Arc.html
37 //! Atomic types may be stored in static variables, initialized using
38 //! the constant initializers like [`AtomicBool::new`]. Atomic statics
39 //! are often used for lazy global initialization.
41 //! [`AtomicBool::new`]: struct.AtomicBool.html#method.new
45 //! All atomic types in this module are guaranteed to be [lock-free] if they're
46 //! available. This means they don't internally acquire a global mutex. Atomic
47 //! types and operations are not guaranteed to be wait-free. This means that
48 //! operations like `fetch_or` may be implemented with a compare-and-swap loop.
50 //! Atomic operations may be implemented at the instruction layer with
51 //! larger-size atomics. For example some platforms use 4-byte atomic
52 //! instructions to implement `AtomicI8`. Note that this emulation should not
53 //! have an impact on correctness of code, it's just something to be aware of.
55 //! The atomic types in this module may not be available on all platforms. The
56 //! atomic types here are all widely available, however, and can generally be
57 //! relied upon existing. Some notable exceptions are:
59 //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
60 //! `AtomicI64` types.
61 //! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
63 //! * ARM targets with `thumbv6m` do not have atomic operations at all.
65 //! Note that future platforms may be added that also do not have support for
66 //! some atomic operations. Maximally portable code will want to be careful
67 //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
68 //! generally the most portable, but even then they're not available everywhere.
69 //! For reference, the `std` library requires pointer-sized atomics, although
72 //! Currently you'll need to use `#[cfg(target_arch)]` primarily to
73 //! conditionally compile in code with atomics. There is an unstable
74 //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
76 //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
80 //! A simple spinlock:
83 //! use std::sync::Arc;
84 //! use std::sync::atomic::{AtomicUsize, Ordering};
88 //! let spinlock = Arc::new(AtomicUsize::new(1));
90 //! let spinlock_clone = spinlock.clone();
91 //! let thread = thread::spawn(move|| {
92 //! spinlock_clone.store(0, Ordering::SeqCst);
95 //! // Wait for the other thread to release the lock
96 //! while spinlock.load(Ordering::SeqCst) != 0 {}
98 //! if let Err(panic) = thread.join() {
99 //! println!("Thread had an error: {:?}", panic);
104 //! Keep a global count of live threads:
107 //! use std::sync::atomic::{AtomicUsize, Ordering};
109 //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
111 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
112 //! println!("live threads: {}", old_thread_count + 1);
115 #![stable(feature = "rust1", since = "1.0.0")]
116 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
117 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
119 use self::Ordering::*;
121 use crate::intrinsics;
122 use crate::cell::UnsafeCell;
125 use crate::hint::spin_loop;
127 /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
129 /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
130 /// power or switching hyper-threads.
132 /// This function is different from [`std::thread::yield_now`] which directly yields to the
133 /// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
135 /// Spin locks can be very efficient for short lock durations because they do not involve context
136 /// switches or interaction with the operating system. For long lock durations they become wasteful
137 /// however because they use CPU cycles for the entire lock duration, and using a
138 /// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is
139 /// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`]
140 /// or [`std::thread::sleep`] may be the best option.
142 /// **Note**: Spin locks are based on the underlying assumption that another thread will release
143 /// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or
144 /// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms.
146 /// **Note**: On platforms that do not support receiving spin-loop hints this function does not
147 /// do anything at all.
149 /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
150 /// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
151 /// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
153 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
154 pub fn spin_loop_hint() {
158 /// A boolean type which can be safely shared between threads.
160 /// This type has the same in-memory representation as a [`bool`].
162 /// [`bool`]: ../../../std/primitive.bool.html
163 #[cfg(target_has_atomic = "8")]
164 #[stable(feature = "rust1", since = "1.0.0")]
166 pub struct AtomicBool {
170 #[cfg(target_has_atomic = "8")]
171 #[stable(feature = "rust1", since = "1.0.0")]
172 impl Default for AtomicBool {
173 /// Creates an `AtomicBool` initialized to `false`.
174 fn default() -> Self {
179 // Send is implicitly implemented for AtomicBool.
180 #[cfg(target_has_atomic = "8")]
181 #[stable(feature = "rust1", since = "1.0.0")]
182 unsafe impl Sync for AtomicBool {}
184 /// A raw pointer type which can be safely shared between threads.
186 /// This type has the same in-memory representation as a `*mut T`.
187 #[cfg(target_has_atomic = "ptr")]
188 #[stable(feature = "rust1", since = "1.0.0")]
189 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
190 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
191 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
192 pub struct AtomicPtr<T> {
193 p: UnsafeCell<*mut T>,
196 #[cfg(target_has_atomic = "ptr")]
197 #[stable(feature = "rust1", since = "1.0.0")]
198 impl<T> Default for AtomicPtr<T> {
199 /// Creates a null `AtomicPtr<T>`.
200 fn default() -> AtomicPtr<T> {
201 AtomicPtr::new(crate::ptr::null_mut())
205 #[cfg(target_has_atomic = "ptr")]
206 #[stable(feature = "rust1", since = "1.0.0")]
207 unsafe impl<T> Send for AtomicPtr<T> {}
208 #[cfg(target_has_atomic = "ptr")]
209 #[stable(feature = "rust1", since = "1.0.0")]
210 unsafe impl<T> Sync for AtomicPtr<T> {}
212 /// Atomic memory orderings
214 /// Memory orderings specify the way atomic operations synchronize memory.
215 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
216 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
217 /// operations synchronize other memory while additionally preserving a total order of such
218 /// operations across all threads.
220 /// Rust's memory orderings are [the same as
221 /// LLVM's](https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
223 /// For more information see the [nomicon].
225 /// [nomicon]: ../../../nomicon/atomics.html
226 /// [Ordering::Relaxed]: #variant.Relaxed
227 /// [Ordering::SeqCst]: #variant.SeqCst
228 #[stable(feature = "rust1", since = "1.0.0")]
229 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
232 /// No ordering constraints, only atomic operations.
234 /// Corresponds to LLVM's [`Monotonic`] ordering.
236 /// [`Monotonic`]: https://llvm.org/docs/Atomics.html#monotonic
237 #[stable(feature = "rust1", since = "1.0.0")]
239 /// When coupled with a store, all previous operations become ordered
240 /// before any load of this value with [`Acquire`] (or stronger) ordering.
241 /// In particular, all previous writes become visible to all threads
242 /// that perform an [`Acquire`] (or stronger) load of this value.
244 /// Notice that using this ordering for an operation that combines loads
245 /// and stores leads to a [`Relaxed`] load operation!
247 /// This ordering is only applicable for operations that can perform a store.
249 /// Corresponds to LLVM's [`Release`] ordering.
251 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
252 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
253 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
254 #[stable(feature = "rust1", since = "1.0.0")]
256 /// When coupled with a load, if the loaded value was written by a store operation with
257 /// [`Release`] (or stronger) ordering, then all subsequent operations
258 /// become ordered after that store. In particular, all subsequent loads will see data
259 /// written before the store.
261 /// Notice that using this ordering for an operation that combines loads
262 /// and stores leads to a [`Relaxed`] store operation!
264 /// This ordering is only applicable for operations that can perform a load.
266 /// Corresponds to LLVM's [`Acquire`] ordering.
268 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
269 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
270 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
271 #[stable(feature = "rust1", since = "1.0.0")]
273 /// Has the effects of both [`Acquire`] and [`Release`] together:
274 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
276 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
277 /// not performing any store and hence it has just [`Acquire`] ordering. However,
278 /// [`AcqRel`][`AcquireRelease`] will never perform [`Relaxed`] accesses.
280 /// This ordering is only applicable for operations that combine both loads and stores.
282 /// Corresponds to LLVM's [`AcquireRelease`] ordering.
284 /// [`AcquireRelease`]: https://llvm.org/docs/Atomics.html#acquirerelease
285 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
286 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
287 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
288 #[stable(feature = "rust1", since = "1.0.0")]
290 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
291 /// operations, respectively) with the additional guarantee that all threads see all
292 /// sequentially consistent operations in the same order.
294 /// Corresponds to LLVM's [`SequentiallyConsistent`] ordering.
296 /// [`SequentiallyConsistent`]: https://llvm.org/docs/Atomics.html#sequentiallyconsistent
297 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
298 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
299 /// [`AcqRel`]: https://llvm.org/docs/Atomics.html#acquirerelease
300 #[stable(feature = "rust1", since = "1.0.0")]
304 /// An [`AtomicBool`] initialized to `false`.
306 /// [`AtomicBool`]: struct.AtomicBool.html
307 #[cfg(target_has_atomic = "8")]
308 #[stable(feature = "rust1", since = "1.0.0")]
311 reason = "the `new` function is now preferred",
312 suggestion = "AtomicBool::new(false)",
314 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
316 #[cfg(target_has_atomic = "8")]
318 /// Creates a new `AtomicBool`.
323 /// use std::sync::atomic::AtomicBool;
325 /// let atomic_true = AtomicBool::new(true);
326 /// let atomic_false = AtomicBool::new(false);
329 #[stable(feature = "rust1", since = "1.0.0")]
330 pub const fn new(v: bool) -> AtomicBool {
331 AtomicBool { v: UnsafeCell::new(v as u8) }
334 /// Returns a mutable reference to the underlying [`bool`].
336 /// This is safe because the mutable reference guarantees that no other threads are
337 /// concurrently accessing the atomic data.
339 /// [`bool`]: ../../../std/primitive.bool.html
344 /// use std::sync::atomic::{AtomicBool, Ordering};
346 /// let mut some_bool = AtomicBool::new(true);
347 /// assert_eq!(*some_bool.get_mut(), true);
348 /// *some_bool.get_mut() = false;
349 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
352 #[stable(feature = "atomic_access", since = "1.15.0")]
353 pub fn get_mut(&mut self) -> &mut bool {
354 unsafe { &mut *(self.v.get() as *mut bool) }
357 /// Consumes the atomic and returns the contained value.
359 /// This is safe because passing `self` by value guarantees that no other threads are
360 /// concurrently accessing the atomic data.
365 /// use std::sync::atomic::AtomicBool;
367 /// let some_bool = AtomicBool::new(true);
368 /// assert_eq!(some_bool.into_inner(), true);
371 #[stable(feature = "atomic_access", since = "1.15.0")]
372 pub fn into_inner(self) -> bool {
373 self.v.into_inner() != 0
376 /// Loads a value from the bool.
378 /// `load` takes an [`Ordering`] argument which describes the memory ordering
379 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
383 /// Panics if `order` is [`Release`] or [`AcqRel`].
385 /// [`Ordering`]: enum.Ordering.html
386 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
387 /// [`Release`]: enum.Ordering.html#variant.Release
388 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
389 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
390 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
395 /// use std::sync::atomic::{AtomicBool, Ordering};
397 /// let some_bool = AtomicBool::new(true);
399 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
402 #[stable(feature = "rust1", since = "1.0.0")]
403 pub fn load(&self, order: Ordering) -> bool {
404 unsafe { atomic_load(self.v.get(), order) != 0 }
407 /// Stores a value into the bool.
409 /// `store` takes an [`Ordering`] argument which describes the memory ordering
410 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
414 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
416 /// [`Ordering`]: enum.Ordering.html
417 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
418 /// [`Release`]: enum.Ordering.html#variant.Release
419 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
420 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
421 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
426 /// use std::sync::atomic::{AtomicBool, Ordering};
428 /// let some_bool = AtomicBool::new(true);
430 /// some_bool.store(false, Ordering::Relaxed);
431 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
434 #[stable(feature = "rust1", since = "1.0.0")]
435 pub fn store(&self, val: bool, order: Ordering) {
437 atomic_store(self.v.get(), val as u8, order);
441 /// Stores a value into the bool, returning the previous value.
443 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
444 /// of this operation. All ordering modes are possible. Note that using
445 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
446 /// using [`Release`] makes the load part [`Relaxed`].
448 /// [`Ordering`]: enum.Ordering.html
449 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
450 /// [`Release`]: enum.Ordering.html#variant.Release
451 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
456 /// use std::sync::atomic::{AtomicBool, Ordering};
458 /// let some_bool = AtomicBool::new(true);
460 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
461 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
464 #[stable(feature = "rust1", since = "1.0.0")]
465 #[cfg(target_has_atomic = "cas")]
466 pub fn swap(&self, val: bool, order: Ordering) -> bool {
467 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
470 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
472 /// The return value is always the previous value. If it is equal to `current`, then the value
475 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
476 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
477 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
478 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
479 /// happens, and using [`Release`] makes the load part [`Relaxed`].
481 /// [`Ordering`]: enum.Ordering.html
482 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
483 /// [`Release`]: enum.Ordering.html#variant.Release
484 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
485 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
486 /// [`bool`]: ../../../std/primitive.bool.html
491 /// use std::sync::atomic::{AtomicBool, Ordering};
493 /// let some_bool = AtomicBool::new(true);
495 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
496 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
498 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
499 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
502 #[stable(feature = "rust1", since = "1.0.0")]
503 #[cfg(target_has_atomic = "cas")]
504 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
505 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
511 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
513 /// The return value is a result indicating whether the new value was written and containing
514 /// the previous value. On success this value is guaranteed to be equal to `current`.
516 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
517 /// ordering of this operation. The first describes the required ordering if the
518 /// operation succeeds while the second describes the required ordering when the
519 /// operation fails. Using [`Acquire`] as success ordering makes the store part
520 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
521 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
522 /// and must be equivalent to or weaker than the success ordering.
525 /// [`bool`]: ../../../std/primitive.bool.html
526 /// [`Ordering`]: enum.Ordering.html
527 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
528 /// [`Release`]: enum.Ordering.html#variant.Release
529 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
530 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
535 /// use std::sync::atomic::{AtomicBool, Ordering};
537 /// let some_bool = AtomicBool::new(true);
539 /// assert_eq!(some_bool.compare_exchange(true,
541 /// Ordering::Acquire,
542 /// Ordering::Relaxed),
544 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
546 /// assert_eq!(some_bool.compare_exchange(true, true,
547 /// Ordering::SeqCst,
548 /// Ordering::Acquire),
550 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
553 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
554 #[cfg(target_has_atomic = "cas")]
555 pub fn compare_exchange(&self,
560 -> Result<bool, bool> {
562 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
565 Err(x) => Err(x != 0),
569 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
571 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
572 /// comparison succeeds, which can result in more efficient code on some platforms. The
573 /// return value is a result indicating whether the new value was written and containing the
576 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
577 /// ordering of this operation. The first describes the required ordering if the
578 /// operation succeeds while the second describes the required ordering when the
579 /// operation fails. Using [`Acquire`] as success ordering makes the store part
580 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
581 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
582 /// and must be equivalent to or weaker than the success ordering.
584 /// [`bool`]: ../../../std/primitive.bool.html
585 /// [`compare_exchange`]: #method.compare_exchange
586 /// [`Ordering`]: enum.Ordering.html
587 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
588 /// [`Release`]: enum.Ordering.html#variant.Release
589 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
590 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
595 /// use std::sync::atomic::{AtomicBool, Ordering};
597 /// let val = AtomicBool::new(false);
600 /// let mut old = val.load(Ordering::Relaxed);
602 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
604 /// Err(x) => old = x,
609 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
610 #[cfg(target_has_atomic = "cas")]
611 pub fn compare_exchange_weak(&self,
616 -> Result<bool, bool> {
618 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
621 Err(x) => Err(x != 0),
625 /// Logical "and" with a boolean value.
627 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
628 /// the new value to the result.
630 /// Returns the previous value.
632 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
633 /// of this operation. All ordering modes are possible. Note that using
634 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
635 /// using [`Release`] makes the load part [`Relaxed`].
637 /// [`Ordering`]: enum.Ordering.html
638 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
639 /// [`Release`]: enum.Ordering.html#variant.Release
640 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
645 /// use std::sync::atomic::{AtomicBool, Ordering};
647 /// let foo = AtomicBool::new(true);
648 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
649 /// assert_eq!(foo.load(Ordering::SeqCst), false);
651 /// let foo = AtomicBool::new(true);
652 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
653 /// assert_eq!(foo.load(Ordering::SeqCst), true);
655 /// let foo = AtomicBool::new(false);
656 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
657 /// assert_eq!(foo.load(Ordering::SeqCst), false);
660 #[stable(feature = "rust1", since = "1.0.0")]
661 #[cfg(target_has_atomic = "cas")]
662 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
663 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
666 /// Logical "nand" with a boolean value.
668 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
669 /// the new value to the result.
671 /// Returns the previous value.
673 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
674 /// of this operation. All ordering modes are possible. Note that using
675 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
676 /// using [`Release`] makes the load part [`Relaxed`].
678 /// [`Ordering`]: enum.Ordering.html
679 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
680 /// [`Release`]: enum.Ordering.html#variant.Release
681 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
686 /// use std::sync::atomic::{AtomicBool, Ordering};
688 /// let foo = AtomicBool::new(true);
689 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
690 /// assert_eq!(foo.load(Ordering::SeqCst), true);
692 /// let foo = AtomicBool::new(true);
693 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
694 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
695 /// assert_eq!(foo.load(Ordering::SeqCst), false);
697 /// let foo = AtomicBool::new(false);
698 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
699 /// assert_eq!(foo.load(Ordering::SeqCst), true);
702 #[stable(feature = "rust1", since = "1.0.0")]
703 #[cfg(target_has_atomic = "cas")]
704 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
705 // We can't use atomic_nand here because it can result in a bool with
706 // an invalid value. This happens because the atomic operation is done
707 // with an 8-bit integer internally, which would set the upper 7 bits.
708 // So we just use fetch_xor or swap instead.
711 // We must invert the bool.
712 self.fetch_xor(true, order)
714 // !(x & false) == true
715 // We must set the bool to true.
716 self.swap(true, order)
720 /// Logical "or" with a boolean value.
722 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
723 /// new value to the result.
725 /// Returns the previous value.
727 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
728 /// of this operation. All ordering modes are possible. Note that using
729 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
730 /// using [`Release`] makes the load part [`Relaxed`].
732 /// [`Ordering`]: enum.Ordering.html
733 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
734 /// [`Release`]: enum.Ordering.html#variant.Release
735 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
740 /// use std::sync::atomic::{AtomicBool, Ordering};
742 /// let foo = AtomicBool::new(true);
743 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
744 /// assert_eq!(foo.load(Ordering::SeqCst), true);
746 /// let foo = AtomicBool::new(true);
747 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
748 /// assert_eq!(foo.load(Ordering::SeqCst), true);
750 /// let foo = AtomicBool::new(false);
751 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
752 /// assert_eq!(foo.load(Ordering::SeqCst), false);
755 #[stable(feature = "rust1", since = "1.0.0")]
756 #[cfg(target_has_atomic = "cas")]
757 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
758 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
761 /// Logical "xor" with a boolean value.
763 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
764 /// the new value to the result.
766 /// Returns the previous value.
768 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
769 /// of this operation. All ordering modes are possible. Note that using
770 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
771 /// using [`Release`] makes the load part [`Relaxed`].
773 /// [`Ordering`]: enum.Ordering.html
774 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
775 /// [`Release`]: enum.Ordering.html#variant.Release
776 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
781 /// use std::sync::atomic::{AtomicBool, Ordering};
783 /// let foo = AtomicBool::new(true);
784 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
785 /// assert_eq!(foo.load(Ordering::SeqCst), true);
787 /// let foo = AtomicBool::new(true);
788 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
789 /// assert_eq!(foo.load(Ordering::SeqCst), false);
791 /// let foo = AtomicBool::new(false);
792 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
793 /// assert_eq!(foo.load(Ordering::SeqCst), false);
796 #[stable(feature = "rust1", since = "1.0.0")]
797 #[cfg(target_has_atomic = "cas")]
798 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
799 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
803 #[cfg(target_has_atomic = "ptr")]
804 impl<T> AtomicPtr<T> {
805 /// Creates a new `AtomicPtr`.
810 /// use std::sync::atomic::AtomicPtr;
812 /// let ptr = &mut 5;
813 /// let atomic_ptr = AtomicPtr::new(ptr);
816 #[stable(feature = "rust1", since = "1.0.0")]
817 pub const fn new(p: *mut T) -> AtomicPtr<T> {
818 AtomicPtr { p: UnsafeCell::new(p) }
821 /// Returns a mutable reference to the underlying pointer.
823 /// This is safe because the mutable reference guarantees that no other threads are
824 /// concurrently accessing the atomic data.
829 /// use std::sync::atomic::{AtomicPtr, Ordering};
831 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
832 /// *atomic_ptr.get_mut() = &mut 5;
833 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
836 #[stable(feature = "atomic_access", since = "1.15.0")]
837 pub fn get_mut(&mut self) -> &mut *mut T {
838 unsafe { &mut *self.p.get() }
841 /// Consumes the atomic and returns the contained value.
843 /// This is safe because passing `self` by value guarantees that no other threads are
844 /// concurrently accessing the atomic data.
849 /// use std::sync::atomic::AtomicPtr;
851 /// let atomic_ptr = AtomicPtr::new(&mut 5);
852 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
855 #[stable(feature = "atomic_access", since = "1.15.0")]
856 pub fn into_inner(self) -> *mut T {
860 /// Loads a value from the pointer.
862 /// `load` takes an [`Ordering`] argument which describes the memory ordering
863 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
867 /// Panics if `order` is [`Release`] or [`AcqRel`].
869 /// [`Ordering`]: enum.Ordering.html
870 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
871 /// [`Release`]: enum.Ordering.html#variant.Release
872 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
873 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
874 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
879 /// use std::sync::atomic::{AtomicPtr, Ordering};
881 /// let ptr = &mut 5;
882 /// let some_ptr = AtomicPtr::new(ptr);
884 /// let value = some_ptr.load(Ordering::Relaxed);
887 #[stable(feature = "rust1", since = "1.0.0")]
888 pub fn load(&self, order: Ordering) -> *mut T {
889 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
892 /// Stores a value into the pointer.
894 /// `store` takes an [`Ordering`] argument which describes the memory ordering
895 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
899 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
901 /// [`Ordering`]: enum.Ordering.html
902 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
903 /// [`Release`]: enum.Ordering.html#variant.Release
904 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
905 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
906 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
911 /// use std::sync::atomic::{AtomicPtr, Ordering};
913 /// let ptr = &mut 5;
914 /// let some_ptr = AtomicPtr::new(ptr);
916 /// let other_ptr = &mut 10;
918 /// some_ptr.store(other_ptr, Ordering::Relaxed);
921 #[stable(feature = "rust1", since = "1.0.0")]
922 pub fn store(&self, ptr: *mut T, order: Ordering) {
924 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
928 /// Stores a value into the pointer, returning the previous value.
930 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
931 /// of this operation. All ordering modes are possible. Note that using
932 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
933 /// using [`Release`] makes the load part [`Relaxed`].
935 /// [`Ordering`]: enum.Ordering.html
936 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
937 /// [`Release`]: enum.Ordering.html#variant.Release
938 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
943 /// use std::sync::atomic::{AtomicPtr, Ordering};
945 /// let ptr = &mut 5;
946 /// let some_ptr = AtomicPtr::new(ptr);
948 /// let other_ptr = &mut 10;
950 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
953 #[stable(feature = "rust1", since = "1.0.0")]
954 #[cfg(target_has_atomic = "cas")]
955 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
956 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
959 /// Stores a value into the pointer if the current value is the same as the `current` value.
961 /// The return value is always the previous value. If it is equal to `current`, then the value
964 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
965 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
966 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
967 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
968 /// happens, and using [`Release`] makes the load part [`Relaxed`].
970 /// [`Ordering`]: enum.Ordering.html
971 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
972 /// [`Release`]: enum.Ordering.html#variant.Release
973 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
974 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
979 /// use std::sync::atomic::{AtomicPtr, Ordering};
981 /// let ptr = &mut 5;
982 /// let some_ptr = AtomicPtr::new(ptr);
984 /// let other_ptr = &mut 10;
986 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
989 #[stable(feature = "rust1", since = "1.0.0")]
990 #[cfg(target_has_atomic = "cas")]
991 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
992 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
998 /// Stores a value into the pointer if the current value is the same as the `current` value.
1000 /// The return value is a result indicating whether the new value was written and containing
1001 /// the previous value. On success this value is guaranteed to be equal to `current`.
1003 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1004 /// ordering of this operation. The first describes the required ordering if the
1005 /// operation succeeds while the second describes the required ordering when the
1006 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1007 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1008 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1009 /// and must be equivalent to or weaker than the success ordering.
1011 /// [`Ordering`]: enum.Ordering.html
1012 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1013 /// [`Release`]: enum.Ordering.html#variant.Release
1014 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1015 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1020 /// use std::sync::atomic::{AtomicPtr, Ordering};
1022 /// let ptr = &mut 5;
1023 /// let some_ptr = AtomicPtr::new(ptr);
1025 /// let other_ptr = &mut 10;
1027 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1028 /// Ordering::SeqCst, Ordering::Relaxed);
1031 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1032 #[cfg(target_has_atomic = "cas")]
1033 pub fn compare_exchange(&self,
1038 -> Result<*mut T, *mut T> {
1040 let res = atomic_compare_exchange(self.p.get() as *mut usize,
1046 Ok(x) => Ok(x as *mut T),
1047 Err(x) => Err(x as *mut T),
1052 /// Stores a value into the pointer if the current value is the same as the `current` value.
1054 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1055 /// comparison succeeds, which can result in more efficient code on some platforms. The
1056 /// return value is a result indicating whether the new value was written and containing the
1059 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1060 /// ordering of this operation. The first describes the required ordering if the
1061 /// operation succeeds while the second describes the required ordering when the
1062 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1063 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1064 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1065 /// and must be equivalent to or weaker than the success ordering.
1067 /// [`compare_exchange`]: #method.compare_exchange
1068 /// [`Ordering`]: enum.Ordering.html
1069 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1070 /// [`Release`]: enum.Ordering.html#variant.Release
1071 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1072 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1077 /// use std::sync::atomic::{AtomicPtr, Ordering};
1079 /// let some_ptr = AtomicPtr::new(&mut 5);
1081 /// let new = &mut 10;
1082 /// let mut old = some_ptr.load(Ordering::Relaxed);
1084 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1086 /// Err(x) => old = x,
1091 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1092 #[cfg(target_has_atomic = "cas")]
1093 pub fn compare_exchange_weak(&self,
1098 -> Result<*mut T, *mut T> {
1100 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
1106 Ok(x) => Ok(x as *mut T),
1107 Err(x) => Err(x as *mut T),
1113 #[cfg(target_has_atomic = "8")]
1114 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1115 impl From<bool> for AtomicBool {
1116 /// Converts a `bool` into an `AtomicBool`.
1121 /// use std::sync::atomic::AtomicBool;
1122 /// let atomic_bool = AtomicBool::from(true);
1123 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1126 fn from(b: bool) -> Self { Self::new(b) }
1129 #[cfg(target_has_atomic = "ptr")]
1130 #[stable(feature = "atomic_from", since = "1.23.0")]
1131 impl<T> From<*mut T> for AtomicPtr<T> {
1133 fn from(p: *mut T) -> Self { Self::new(p) }
1136 #[cfg(target_has_atomic = "ptr")]
1137 macro_rules! atomic_int {
1141 $stable_access:meta,
1144 $stable_init_const:meta,
1145 $s_int_type:expr, $int_ref:expr,
1146 $extra_feature:expr,
1147 $min_fn:ident, $max_fn:ident,
1150 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1151 /// An integer type which can be safely shared between threads.
1153 /// This type has the same in-memory representation as the underlying
1154 /// integer type, [`
1155 #[doc = $s_int_type]
1158 /// ). For more about the differences between atomic types and
1159 /// non-atomic types as well as information about the portability of
1160 /// this type, please see the [module-level documentation].
1162 /// [module-level documentation]: index.html
1164 #[repr(C, align($align))]
1165 pub struct $atomic_type {
1166 v: UnsafeCell<$int_type>,
1169 /// An atomic integer initialized to `0`.
1170 #[$stable_init_const]
1173 reason = "the `new` function is now preferred",
1174 suggestion = $atomic_new,
1176 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1179 impl Default for $atomic_type {
1180 fn default() -> Self {
1181 Self::new(Default::default())
1186 impl From<$int_type> for $atomic_type {
1189 "Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
1191 fn from(v: $int_type) -> Self { Self::new(v) }
1196 impl fmt::Debug for $atomic_type {
1197 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1198 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1202 // Send is implicitly implemented.
1204 unsafe impl Sync for $atomic_type {}
1208 concat!("Creates a new atomic integer.
1213 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1215 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1219 pub const fn new(v: $int_type) -> Self {
1220 $atomic_type {v: UnsafeCell::new(v)}
1225 concat!("Returns a mutable reference to the underlying integer.
1227 This is safe because the mutable reference guarantees that no other threads are
1228 concurrently accessing the atomic data.
1233 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1235 let mut some_var = ", stringify!($atomic_type), "::new(10);
1236 assert_eq!(*some_var.get_mut(), 10);
1237 *some_var.get_mut() = 5;
1238 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1242 pub fn get_mut(&mut self) -> &mut $int_type {
1243 unsafe { &mut *self.v.get() }
1248 concat!("Consumes the atomic and returns the contained value.
1250 This is safe because passing `self` by value guarantees that no other threads are
1251 concurrently accessing the atomic data.
1256 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1258 let some_var = ", stringify!($atomic_type), "::new(5);
1259 assert_eq!(some_var.into_inner(), 5);
1263 pub fn into_inner(self) -> $int_type {
1269 concat!("Loads a value from the atomic integer.
1271 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1272 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1276 Panics if `order` is [`Release`] or [`AcqRel`].
1278 [`Ordering`]: enum.Ordering.html
1279 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1280 [`Release`]: enum.Ordering.html#variant.Release
1281 [`Acquire`]: enum.Ordering.html#variant.Acquire
1282 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1283 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1288 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1290 let some_var = ", stringify!($atomic_type), "::new(5);
1292 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1296 pub fn load(&self, order: Ordering) -> $int_type {
1297 unsafe { atomic_load(self.v.get(), order) }
1302 concat!("Stores a value into the atomic integer.
1304 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1305 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1309 Panics if `order` is [`Acquire`] or [`AcqRel`].
1311 [`Ordering`]: enum.Ordering.html
1312 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1313 [`Release`]: enum.Ordering.html#variant.Release
1314 [`Acquire`]: enum.Ordering.html#variant.Acquire
1315 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1316 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1321 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1323 let some_var = ", stringify!($atomic_type), "::new(5);
1325 some_var.store(10, Ordering::Relaxed);
1326 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1330 pub fn store(&self, val: $int_type, order: Ordering) {
1331 unsafe { atomic_store(self.v.get(), val, order); }
1336 concat!("Stores a value into the atomic integer, returning the previous value.
1338 `swap` takes an [`Ordering`] argument which describes the memory ordering
1339 of this operation. All ordering modes are possible. Note that using
1340 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1341 using [`Release`] makes the load part [`Relaxed`].
1343 [`Ordering`]: enum.Ordering.html
1344 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1345 [`Release`]: enum.Ordering.html#variant.Release
1346 [`Acquire`]: enum.Ordering.html#variant.Acquire
1351 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1353 let some_var = ", stringify!($atomic_type), "::new(5);
1355 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1359 #[cfg(target_has_atomic = "cas")]
1360 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1361 unsafe { atomic_swap(self.v.get(), val, order) }
1366 concat!("Stores a value into the atomic integer if the current value is the same as
1367 the `current` value.
1369 The return value is always the previous value. If it is equal to `current`, then the
1372 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1373 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1374 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1375 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1376 happens, and using [`Release`] makes the load part [`Relaxed`].
1378 [`Ordering`]: enum.Ordering.html
1379 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1380 [`Release`]: enum.Ordering.html#variant.Release
1381 [`Acquire`]: enum.Ordering.html#variant.Acquire
1382 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1387 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1389 let some_var = ", stringify!($atomic_type), "::new(5);
1391 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1392 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1394 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1395 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1399 #[cfg(target_has_atomic = "cas")]
1400 pub fn compare_and_swap(&self,
1403 order: Ordering) -> $int_type {
1404 match self.compare_exchange(current,
1407 strongest_failure_ordering(order)) {
1415 concat!("Stores a value into the atomic integer if the current value is the same as
1416 the `current` value.
1418 The return value is a result indicating whether the new value was written and
1419 containing the previous value. On success this value is guaranteed to be equal to
1422 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1423 ordering of this operation. The first describes the required ordering if the
1424 operation succeeds while the second describes the required ordering when the
1425 operation fails. Using [`Acquire`] as success ordering makes the store part
1426 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1427 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1428 and must be equivalent to or weaker than the success ordering.
1430 [`Ordering`]: enum.Ordering.html
1431 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1432 [`Release`]: enum.Ordering.html#variant.Release
1433 [`Acquire`]: enum.Ordering.html#variant.Acquire
1434 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1439 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1441 let some_var = ", stringify!($atomic_type), "::new(5);
1443 assert_eq!(some_var.compare_exchange(5, 10,
1447 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1449 assert_eq!(some_var.compare_exchange(6, 12,
1453 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1457 #[cfg(target_has_atomic = "cas")]
1458 pub fn compare_exchange(&self,
1462 failure: Ordering) -> Result<$int_type, $int_type> {
1463 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1468 concat!("Stores a value into the atomic integer if the current value is the same as
1469 the `current` value.
1471 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1472 when the comparison succeeds, which can result in more efficient code on some
1473 platforms. The return value is a result indicating whether the new value was
1474 written and containing the previous value.
1476 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1477 ordering of this operation. The first describes the required ordering if the
1478 operation succeeds while the second describes the required ordering when the
1479 operation fails. Using [`Acquire`] as success ordering makes the store part
1480 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1481 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1482 and must be equivalent to or weaker than the success ordering.
1484 [`compare_exchange`]: #method.compare_exchange
1485 [`Ordering`]: enum.Ordering.html
1486 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1487 [`Release`]: enum.Ordering.html#variant.Release
1488 [`Acquire`]: enum.Ordering.html#variant.Acquire
1489 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1494 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1496 let val = ", stringify!($atomic_type), "::new(4);
1498 let mut old = val.load(Ordering::Relaxed);
1501 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1509 #[cfg(target_has_atomic = "cas")]
1510 pub fn compare_exchange_weak(&self,
1514 failure: Ordering) -> Result<$int_type, $int_type> {
1516 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1522 concat!("Adds to the current value, returning the previous value.
1524 This operation wraps around on overflow.
1526 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1527 of this operation. All ordering modes are possible. Note that using
1528 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1529 using [`Release`] makes the load part [`Relaxed`].
1531 [`Ordering`]: enum.Ordering.html
1532 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1533 [`Release`]: enum.Ordering.html#variant.Release
1534 [`Acquire`]: enum.Ordering.html#variant.Acquire
1539 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1541 let foo = ", stringify!($atomic_type), "::new(0);
1542 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1543 assert_eq!(foo.load(Ordering::SeqCst), 10);
1547 #[cfg(target_has_atomic = "cas")]
1548 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1549 unsafe { atomic_add(self.v.get(), val, order) }
1554 concat!("Subtracts from the current value, returning the previous value.
1556 This operation wraps around on overflow.
1558 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1559 of this operation. All ordering modes are possible. Note that using
1560 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1561 using [`Release`] makes the load part [`Relaxed`].
1563 [`Ordering`]: enum.Ordering.html
1564 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1565 [`Release`]: enum.Ordering.html#variant.Release
1566 [`Acquire`]: enum.Ordering.html#variant.Acquire
1571 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1573 let foo = ", stringify!($atomic_type), "::new(20);
1574 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1575 assert_eq!(foo.load(Ordering::SeqCst), 10);
1579 #[cfg(target_has_atomic = "cas")]
1580 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1581 unsafe { atomic_sub(self.v.get(), val, order) }
1586 concat!("Bitwise \"and\" with the current value.
1588 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1589 sets the new value to the result.
1591 Returns the previous value.
1593 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1594 of this operation. All ordering modes are possible. Note that using
1595 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1596 using [`Release`] makes the load part [`Relaxed`].
1598 [`Ordering`]: enum.Ordering.html
1599 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1600 [`Release`]: enum.Ordering.html#variant.Release
1601 [`Acquire`]: enum.Ordering.html#variant.Acquire
1606 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1608 let foo = ", stringify!($atomic_type), "::new(0b101101);
1609 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1610 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1614 #[cfg(target_has_atomic = "cas")]
1615 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1616 unsafe { atomic_and(self.v.get(), val, order) }
1621 concat!("Bitwise \"nand\" with the current value.
1623 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1624 sets the new value to the result.
1626 Returns the previous value.
1628 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1629 of this operation. All ordering modes are possible. Note that using
1630 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1631 using [`Release`] makes the load part [`Relaxed`].
1633 [`Ordering`]: enum.Ordering.html
1634 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1635 [`Release`]: enum.Ordering.html#variant.Release
1636 [`Acquire`]: enum.Ordering.html#variant.Acquire
1641 ", $extra_feature, "
1642 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1644 let foo = ", stringify!($atomic_type), "::new(0x13);
1645 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1646 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1650 #[cfg(target_has_atomic = "cas")]
1651 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1652 unsafe { atomic_nand(self.v.get(), val, order) }
1657 concat!("Bitwise \"or\" with the current value.
1659 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1660 sets the new value to the result.
1662 Returns the previous value.
1664 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1665 of this operation. All ordering modes are possible. Note that using
1666 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1667 using [`Release`] makes the load part [`Relaxed`].
1669 [`Ordering`]: enum.Ordering.html
1670 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1671 [`Release`]: enum.Ordering.html#variant.Release
1672 [`Acquire`]: enum.Ordering.html#variant.Acquire
1677 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1679 let foo = ", stringify!($atomic_type), "::new(0b101101);
1680 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1681 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1685 #[cfg(target_has_atomic = "cas")]
1686 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1687 unsafe { atomic_or(self.v.get(), val, order) }
1692 concat!("Bitwise \"xor\" with the current value.
1694 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1695 sets the new value to the result.
1697 Returns the previous value.
1699 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1700 of this operation. All ordering modes are possible. Note that using
1701 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1702 using [`Release`] makes the load part [`Relaxed`].
1704 [`Ordering`]: enum.Ordering.html
1705 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1706 [`Release`]: enum.Ordering.html#variant.Release
1707 [`Acquire`]: enum.Ordering.html#variant.Acquire
1712 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1714 let foo = ", stringify!($atomic_type), "::new(0b101101);
1715 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1716 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1720 #[cfg(target_has_atomic = "cas")]
1721 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1722 unsafe { atomic_xor(self.v.get(), val, order) }
1727 concat!("Fetches the value, and applies a function to it that returns an optional
1728 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1729 `Err(previous_value)`.
1731 Note: This may call the function multiple times if the value has been changed from other threads in
1732 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1733 but once to the stored value.
1735 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1736 ordering of this operation. The first describes the required ordering for loads
1737 and failed updates while the second describes the required ordering when the
1738 operation finally succeeds. Beware that this is different from the two
1739 modes in [`compare_exchange`]!
1741 Using [`Acquire`] as success ordering makes the store part
1742 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1743 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1744 and must be equivalent to or weaker than the success ordering.
1746 [`bool`]: ../../../std/primitive.bool.html
1747 [`compare_exchange`]: #method.compare_exchange
1748 [`Ordering`]: enum.Ordering.html
1749 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1750 [`Release`]: enum.Ordering.html#variant.Release
1751 [`Acquire`]: enum.Ordering.html#variant.Acquire
1752 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1757 #![feature(no_more_cas)]
1758 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1760 let x = ", stringify!($atomic_type), "::new(7);
1761 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1762 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1763 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1764 assert_eq!(x.load(Ordering::SeqCst), 9);
1767 #[unstable(feature = "no_more_cas",
1768 reason = "no more CAS loops in user code",
1770 #[cfg(target_has_atomic = "cas")]
1771 pub fn fetch_update<F>(&self,
1773 fetch_order: Ordering,
1774 set_order: Ordering) -> Result<$int_type, $int_type>
1775 where F: FnMut($int_type) -> Option<$int_type> {
1776 let mut prev = self.load(fetch_order);
1777 while let Some(next) = f(prev) {
1778 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1779 x @ Ok(_) => return x,
1780 Err(next_prev) => prev = next_prev
1788 concat!("Maximum with the current value.
1790 Finds the maximum of the current value and the argument `val`, and
1791 sets the new value to the result.
1793 Returns the previous value.
1795 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1796 of this operation. All ordering modes are possible. Note that using
1797 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1798 using [`Release`] makes the load part [`Relaxed`].
1800 [`Ordering`]: enum.Ordering.html
1801 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1802 [`Release`]: enum.Ordering.html#variant.Release
1803 [`Acquire`]: enum.Ordering.html#variant.Acquire
1808 #![feature(atomic_min_max)]
1809 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1811 let foo = ", stringify!($atomic_type), "::new(23);
1812 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1813 assert_eq!(foo.load(Ordering::SeqCst), 42);
1816 If you want to obtain the maximum value in one step, you can use the following:
1819 #![feature(atomic_min_max)]
1820 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1822 let foo = ", stringify!($atomic_type), "::new(23);
1824 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1825 assert!(max_foo == 42);
1828 #[unstable(feature = "atomic_min_max",
1829 reason = "easier and faster min/max than writing manual CAS loop",
1831 #[cfg(target_has_atomic = "cas")]
1832 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1833 unsafe { $max_fn(self.v.get(), val, order) }
1838 concat!("Minimum with the current value.
1840 Finds the minimum of the current value and the argument `val`, and
1841 sets the new value to the result.
1843 Returns the previous value.
1845 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1846 of this operation. All ordering modes are possible. Note that using
1847 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1848 using [`Release`] makes the load part [`Relaxed`].
1850 [`Ordering`]: enum.Ordering.html
1851 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1852 [`Release`]: enum.Ordering.html#variant.Release
1853 [`Acquire`]: enum.Ordering.html#variant.Acquire
1858 #![feature(atomic_min_max)]
1859 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1861 let foo = ", stringify!($atomic_type), "::new(23);
1862 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1863 assert_eq!(foo.load(Ordering::Relaxed), 23);
1864 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1865 assert_eq!(foo.load(Ordering::Relaxed), 22);
1868 If you want to obtain the minimum value in one step, you can use the following:
1871 #![feature(atomic_min_max)]
1872 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1874 let foo = ", stringify!($atomic_type), "::new(23);
1876 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1877 assert_eq!(min_foo, 12);
1880 #[unstable(feature = "atomic_min_max",
1881 reason = "easier and faster min/max than writing manual CAS loop",
1883 #[cfg(target_has_atomic = "cas")]
1884 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1885 unsafe { $min_fn(self.v.get(), val, order) }
1893 #[cfg(target_has_atomic = "8")]
1895 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1896 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1897 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1898 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1899 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1900 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1901 unstable(feature = "integer_atomics", issue = "32976"),
1902 "i8", "../../../std/primitive.i8.html",
1904 atomic_min, atomic_max,
1907 i8 AtomicI8 ATOMIC_I8_INIT
1909 #[cfg(target_has_atomic = "8")]
1911 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1912 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1913 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1914 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1915 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1916 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1917 unstable(feature = "integer_atomics", issue = "32976"),
1918 "u8", "../../../std/primitive.u8.html",
1920 atomic_umin, atomic_umax,
1923 u8 AtomicU8 ATOMIC_U8_INIT
1925 #[cfg(target_has_atomic = "16")]
1927 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1928 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1929 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1930 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1931 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1932 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1933 unstable(feature = "integer_atomics", issue = "32976"),
1934 "i16", "../../../std/primitive.i16.html",
1936 atomic_min, atomic_max,
1938 "AtomicI16::new(0)",
1939 i16 AtomicI16 ATOMIC_I16_INIT
1941 #[cfg(target_has_atomic = "16")]
1943 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1944 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1945 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1946 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1947 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1948 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1949 unstable(feature = "integer_atomics", issue = "32976"),
1950 "u16", "../../../std/primitive.u16.html",
1952 atomic_umin, atomic_umax,
1954 "AtomicU16::new(0)",
1955 u16 AtomicU16 ATOMIC_U16_INIT
1957 #[cfg(target_has_atomic = "32")]
1959 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1960 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1961 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1962 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1963 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1964 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1965 unstable(feature = "integer_atomics", issue = "32976"),
1966 "i32", "../../../std/primitive.i32.html",
1968 atomic_min, atomic_max,
1970 "AtomicI32::new(0)",
1971 i32 AtomicI32 ATOMIC_I32_INIT
1973 #[cfg(target_has_atomic = "32")]
1975 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1976 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1977 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1978 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1979 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1980 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1981 unstable(feature = "integer_atomics", issue = "32976"),
1982 "u32", "../../../std/primitive.u32.html",
1984 atomic_umin, atomic_umax,
1986 "AtomicU32::new(0)",
1987 u32 AtomicU32 ATOMIC_U32_INIT
1989 #[cfg(target_has_atomic = "64")]
1991 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1992 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1993 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1994 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1995 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1996 stable(feature = "integer_atomics_stable", since = "1.34.0"),
1997 unstable(feature = "integer_atomics", issue = "32976"),
1998 "i64", "../../../std/primitive.i64.html",
2000 atomic_min, atomic_max,
2002 "AtomicI64::new(0)",
2003 i64 AtomicI64 ATOMIC_I64_INIT
2005 #[cfg(target_has_atomic = "64")]
2007 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2008 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2009 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2010 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2011 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2012 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2013 unstable(feature = "integer_atomics", issue = "32976"),
2014 "u64", "../../../std/primitive.u64.html",
2016 atomic_umin, atomic_umax,
2018 "AtomicU64::new(0)",
2019 u64 AtomicU64 ATOMIC_U64_INIT
2021 #[cfg(target_has_atomic = "128")]
2023 unstable(feature = "integer_atomics", issue = "32976"),
2024 unstable(feature = "integer_atomics", issue = "32976"),
2025 unstable(feature = "integer_atomics", issue = "32976"),
2026 unstable(feature = "integer_atomics", issue = "32976"),
2027 unstable(feature = "integer_atomics", issue = "32976"),
2028 unstable(feature = "integer_atomics", issue = "32976"),
2029 unstable(feature = "integer_atomics", issue = "32976"),
2030 "i128", "../../../std/primitive.i128.html",
2031 "#![feature(integer_atomics)]\n\n",
2032 atomic_min, atomic_max,
2034 "AtomicI128::new(0)",
2035 i128 AtomicI128 ATOMIC_I128_INIT
2037 #[cfg(target_has_atomic = "128")]
2039 unstable(feature = "integer_atomics", issue = "32976"),
2040 unstable(feature = "integer_atomics", issue = "32976"),
2041 unstable(feature = "integer_atomics", issue = "32976"),
2042 unstable(feature = "integer_atomics", issue = "32976"),
2043 unstable(feature = "integer_atomics", issue = "32976"),
2044 unstable(feature = "integer_atomics", issue = "32976"),
2045 unstable(feature = "integer_atomics", issue = "32976"),
2046 "u128", "../../../std/primitive.u128.html",
2047 "#![feature(integer_atomics)]\n\n",
2048 atomic_umin, atomic_umax,
2050 "AtomicU128::new(0)",
2051 u128 AtomicU128 ATOMIC_U128_INIT
2053 #[cfg(target_pointer_width = "16")]
2054 macro_rules! ptr_width {
2057 #[cfg(target_pointer_width = "32")]
2058 macro_rules! ptr_width {
2061 #[cfg(target_pointer_width = "64")]
2062 macro_rules! ptr_width {
2065 #[cfg(target_has_atomic = "ptr")]
2067 stable(feature = "rust1", since = "1.0.0"),
2068 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2069 stable(feature = "atomic_debug", since = "1.3.0"),
2070 stable(feature = "atomic_access", since = "1.15.0"),
2071 stable(feature = "atomic_from", since = "1.23.0"),
2072 stable(feature = "atomic_nand", since = "1.27.0"),
2073 stable(feature = "rust1", since = "1.0.0"),
2074 "isize", "../../../std/primitive.isize.html",
2076 atomic_min, atomic_max,
2078 "AtomicIsize::new(0)",
2079 isize AtomicIsize ATOMIC_ISIZE_INIT
2081 #[cfg(target_has_atomic = "ptr")]
2083 stable(feature = "rust1", since = "1.0.0"),
2084 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2085 stable(feature = "atomic_debug", since = "1.3.0"),
2086 stable(feature = "atomic_access", since = "1.15.0"),
2087 stable(feature = "atomic_from", since = "1.23.0"),
2088 stable(feature = "atomic_nand", since = "1.27.0"),
2089 stable(feature = "rust1", since = "1.0.0"),
2090 "usize", "../../../std/primitive.usize.html",
2092 atomic_umin, atomic_umax,
2094 "AtomicUsize::new(0)",
2095 usize AtomicUsize ATOMIC_USIZE_INIT
2099 #[cfg(target_has_atomic = "cas")]
2100 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2111 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2113 Release => intrinsics::atomic_store_rel(dst, val),
2114 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2115 SeqCst => intrinsics::atomic_store(dst, val),
2116 Acquire => panic!("there is no such thing as an acquire store"),
2117 AcqRel => panic!("there is no such thing as an acquire/release store"),
2122 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2124 Acquire => intrinsics::atomic_load_acq(dst),
2125 Relaxed => intrinsics::atomic_load_relaxed(dst),
2126 SeqCst => intrinsics::atomic_load(dst),
2127 Release => panic!("there is no such thing as a release load"),
2128 AcqRel => panic!("there is no such thing as an acquire/release load"),
2133 #[cfg(target_has_atomic = "cas")]
2134 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2136 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2137 Release => intrinsics::atomic_xchg_rel(dst, val),
2138 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2139 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2140 SeqCst => intrinsics::atomic_xchg(dst, val),
2144 /// Returns the previous value (like __sync_fetch_and_add).
2146 #[cfg(target_has_atomic = "cas")]
2147 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2149 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2150 Release => intrinsics::atomic_xadd_rel(dst, val),
2151 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2152 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2153 SeqCst => intrinsics::atomic_xadd(dst, val),
2157 /// Returns the previous value (like __sync_fetch_and_sub).
2159 #[cfg(target_has_atomic = "cas")]
2160 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2162 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2163 Release => intrinsics::atomic_xsub_rel(dst, val),
2164 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2165 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2166 SeqCst => intrinsics::atomic_xsub(dst, val),
2171 #[cfg(target_has_atomic = "cas")]
2172 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
2178 let (val, ok) = match (success, failure) {
2179 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2180 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2181 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2182 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2183 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2184 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2185 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2186 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2187 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2188 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2189 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2190 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2192 if ok { Ok(val) } else { Err(val) }
2196 #[cfg(target_has_atomic = "cas")]
2197 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
2203 let (val, ok) = match (success, failure) {
2204 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2205 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2206 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2207 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2208 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2209 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2210 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2211 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2212 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2213 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2214 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2215 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2217 if ok { Ok(val) } else { Err(val) }
2221 #[cfg(target_has_atomic = "cas")]
2222 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2224 Acquire => intrinsics::atomic_and_acq(dst, val),
2225 Release => intrinsics::atomic_and_rel(dst, val),
2226 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2227 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2228 SeqCst => intrinsics::atomic_and(dst, val),
2233 #[cfg(target_has_atomic = "cas")]
2234 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2236 Acquire => intrinsics::atomic_nand_acq(dst, val),
2237 Release => intrinsics::atomic_nand_rel(dst, val),
2238 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2239 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2240 SeqCst => intrinsics::atomic_nand(dst, val),
2245 #[cfg(target_has_atomic = "cas")]
2246 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2248 Acquire => intrinsics::atomic_or_acq(dst, val),
2249 Release => intrinsics::atomic_or_rel(dst, val),
2250 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2251 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2252 SeqCst => intrinsics::atomic_or(dst, val),
2257 #[cfg(target_has_atomic = "cas")]
2258 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2260 Acquire => intrinsics::atomic_xor_acq(dst, val),
2261 Release => intrinsics::atomic_xor_rel(dst, val),
2262 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2263 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2264 SeqCst => intrinsics::atomic_xor(dst, val),
2268 /// returns the max value (signed comparison)
2270 #[cfg(target_has_atomic = "cas")]
2271 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2273 Acquire => intrinsics::atomic_max_acq(dst, val),
2274 Release => intrinsics::atomic_max_rel(dst, val),
2275 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2276 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2277 SeqCst => intrinsics::atomic_max(dst, val),
2281 /// returns the min value (signed comparison)
2283 #[cfg(target_has_atomic = "cas")]
2284 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2286 Acquire => intrinsics::atomic_min_acq(dst, val),
2287 Release => intrinsics::atomic_min_rel(dst, val),
2288 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2289 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2290 SeqCst => intrinsics::atomic_min(dst, val),
2294 /// returns the max value (signed comparison)
2296 #[cfg(target_has_atomic = "cas")]
2297 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2299 Acquire => intrinsics::atomic_umax_acq(dst, val),
2300 Release => intrinsics::atomic_umax_rel(dst, val),
2301 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2302 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2303 SeqCst => intrinsics::atomic_umax(dst, val),
2307 /// returns the min value (signed comparison)
2309 #[cfg(target_has_atomic = "cas")]
2310 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2312 Acquire => intrinsics::atomic_umin_acq(dst, val),
2313 Release => intrinsics::atomic_umin_rel(dst, val),
2314 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2315 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2316 SeqCst => intrinsics::atomic_umin(dst, val),
2320 /// An atomic fence.
2322 /// Depending on the specified order, a fence prevents the compiler and CPU from
2323 /// reordering certain types of memory operations around it.
2324 /// That creates synchronizes-with relationships between it and atomic operations
2325 /// or fences in other threads.
2327 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2328 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2329 /// exist operations X and Y, both operating on some atomic object 'M' such
2330 /// that A is sequenced before X, Y is synchronized before B and Y observes
2331 /// the change to M. This provides a happens-before dependence between A and B.
2334 /// Thread 1 Thread 2
2336 /// fence(Release); A --------------
2337 /// x.store(3, Relaxed); X --------- |
2340 /// -------------> Y if x.load(Relaxed) == 3 {
2341 /// |-------> B fence(Acquire);
2346 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2349 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2350 /// and [`Release`] semantics, participates in the global program order of the
2351 /// other [`SeqCst`] operations and/or fences.
2353 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2357 /// Panics if `order` is [`Relaxed`].
2362 /// use std::sync::atomic::AtomicBool;
2363 /// use std::sync::atomic::fence;
2364 /// use std::sync::atomic::Ordering;
2366 /// // A mutual exclusion primitive based on spinlock.
2367 /// pub struct Mutex {
2368 /// flag: AtomicBool,
2372 /// pub fn new() -> Mutex {
2374 /// flag: AtomicBool::new(false),
2378 /// pub fn lock(&self) {
2379 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2380 /// // This fence synchronizes-with store in `unlock`.
2381 /// fence(Ordering::Acquire);
2384 /// pub fn unlock(&self) {
2385 /// self.flag.store(false, Ordering::Release);
2390 /// [`Ordering`]: enum.Ordering.html
2391 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2392 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2393 /// [`Release`]: enum.Ordering.html#variant.Release
2394 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2395 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2397 #[stable(feature = "rust1", since = "1.0.0")]
2398 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2399 pub fn fence(order: Ordering) {
2400 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2401 // they will cause LLVM to abort. The wasm instruction set doesn't have
2402 // fences right now. There's discussion online about the best way for tools
2403 // to conventionally implement fences at
2404 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2405 // follow that discussion and implement a solution when one comes about!
2406 #[cfg(not(target_arch = "wasm32"))]
2409 Acquire => intrinsics::atomic_fence_acq(),
2410 Release => intrinsics::atomic_fence_rel(),
2411 AcqRel => intrinsics::atomic_fence_acqrel(),
2412 SeqCst => intrinsics::atomic_fence(),
2413 Relaxed => panic!("there is no such thing as a relaxed fence"),
2419 /// A compiler memory fence.
2421 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2422 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2423 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2424 /// or writes from before or after the call to the other side of the call to
2425 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2426 /// from doing such re-ordering. This is not a problem in a single-threaded,
2427 /// execution context, but when other threads may modify memory at the same
2428 /// time, stronger synchronization primitives such as [`fence`] are required.
2430 /// The re-ordering prevented by the different ordering semantics are:
2432 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2433 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2434 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2435 /// - with [`AcqRel`], both of the above rules are enforced.
2437 /// `compiler_fence` is generally only useful for preventing a thread from
2438 /// racing *with itself*. That is, if a given thread is executing one piece
2439 /// of code, and is then interrupted, and starts executing code elsewhere
2440 /// (while still in the same thread, and conceptually still on the same
2441 /// core). In traditional programs, this can only occur when a signal
2442 /// handler is registered. In more low-level code, such situations can also
2443 /// arise when handling interrupts, when implementing green threads with
2444 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2445 /// discussion of [memory barriers].
2449 /// Panics if `order` is [`Relaxed`].
2453 /// Without `compiler_fence`, the `assert_eq!` in following code
2454 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2455 /// To see why, remember that the compiler is free to swap the stores to
2456 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2457 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2458 /// after `IS_READY` is updated, then the signal handler will see
2459 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2460 /// Using a `compiler_fence` remedies this situation.
2463 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2464 /// use std::sync::atomic::Ordering;
2465 /// use std::sync::atomic::compiler_fence;
2467 /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2468 /// static IS_READY: AtomicBool = AtomicBool::new(false);
2471 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2472 /// // prevent earlier writes from being moved beyond this point
2473 /// compiler_fence(Ordering::Release);
2474 /// IS_READY.store(true, Ordering::Relaxed);
2477 /// fn signal_handler() {
2478 /// if IS_READY.load(Ordering::Relaxed) {
2479 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2484 /// [`fence`]: fn.fence.html
2485 /// [`Ordering`]: enum.Ordering.html
2486 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2487 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2488 /// [`Release`]: enum.Ordering.html#variant.Release
2489 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2490 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2491 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2493 #[stable(feature = "compiler_fences", since = "1.21.0")]
2494 pub fn compiler_fence(order: Ordering) {
2497 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2498 Release => intrinsics::atomic_singlethreadfence_rel(),
2499 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2500 SeqCst => intrinsics::atomic_singlethreadfence(),
2501 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2507 #[cfg(target_has_atomic = "8")]
2508 #[stable(feature = "atomic_debug", since = "1.3.0")]
2509 impl fmt::Debug for AtomicBool {
2510 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2511 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2515 #[cfg(target_has_atomic = "ptr")]
2516 #[stable(feature = "atomic_debug", since = "1.3.0")]
2517 impl<T> fmt::Debug for AtomicPtr<T> {
2518 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2519 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2523 #[cfg(target_has_atomic = "ptr")]
2524 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2525 impl<T> fmt::Pointer for AtomicPtr<T> {
2526 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2527 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)