1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering::*;
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
99 /// This function is deliberately more primitive than
100 /// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and
101 /// does not directly yield to the system's scheduler.
102 /// In some cases it might be useful to use a combination of both functions.
103 /// Careful benchmarking is advised.
105 /// On some platforms this function may not do anything at all.
107 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
108 pub fn spin_loop_hint() {
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
111 asm!("pause" ::: "memory" : "volatile");
114 #[cfg(target_arch = "aarch64")]
116 asm!("yield" ::: "memory" : "volatile");
120 /// A boolean type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a [`bool`].
124 /// [`bool`]: ../../../std/primitive.bool.html
125 #[cfg(target_has_atomic = "8")]
126 #[stable(feature = "rust1", since = "1.0.0")]
127 pub struct AtomicBool {
131 #[cfg(target_has_atomic = "8")]
132 #[stable(feature = "rust1", since = "1.0.0")]
133 impl Default for AtomicBool {
134 /// Creates an `AtomicBool` initialized to `false`.
135 fn default() -> Self {
140 // Send is implicitly implemented for AtomicBool.
141 #[cfg(target_has_atomic = "8")]
142 #[stable(feature = "rust1", since = "1.0.0")]
143 unsafe impl Sync for AtomicBool {}
145 /// A raw pointer type which can be safely shared between threads.
147 /// This type has the same in-memory representation as a `*mut T`.
148 #[cfg(target_has_atomic = "ptr")]
149 #[stable(feature = "rust1", since = "1.0.0")]
150 pub struct AtomicPtr<T> {
151 p: UnsafeCell<*mut T>,
154 #[cfg(target_has_atomic = "ptr")]
155 #[stable(feature = "rust1", since = "1.0.0")]
156 impl<T> Default for AtomicPtr<T> {
157 /// Creates a null `AtomicPtr<T>`.
158 fn default() -> AtomicPtr<T> {
159 AtomicPtr::new(::ptr::null_mut())
163 #[cfg(target_has_atomic = "ptr")]
164 #[stable(feature = "rust1", since = "1.0.0")]
165 unsafe impl<T> Send for AtomicPtr<T> {}
166 #[cfg(target_has_atomic = "ptr")]
167 #[stable(feature = "rust1", since = "1.0.0")]
168 unsafe impl<T> Sync for AtomicPtr<T> {}
170 /// Atomic memory orderings
172 /// Memory orderings limit the ways that both the compiler and CPU may reorder
173 /// instructions around atomic operations. At its most restrictive,
174 /// "sequentially consistent" atomics allow neither reads nor writes
175 /// to be moved either before or after the atomic operation; on the other end
176 /// "relaxed" atomics allow all reorderings.
178 /// Rust's memory orderings are [the same as
179 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
181 /// For more information see the [nomicon].
183 /// [nomicon]: ../../../nomicon/atomics.html
184 #[stable(feature = "rust1", since = "1.0.0")]
185 #[derive(Copy, Clone, Debug)]
187 /// No ordering constraints, only atomic operations.
189 /// Corresponds to LLVM's [`Monotonic`] ordering.
191 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
192 #[stable(feature = "rust1", since = "1.0.0")]
194 /// When coupled with a store, all previous writes become visible
195 /// to the other threads that perform a load with [`Acquire`] ordering
196 /// on the same value.
198 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
199 #[stable(feature = "rust1", since = "1.0.0")]
201 /// When coupled with a load, all subsequent loads will see data
202 /// written before a store with [`Release`] ordering on the same value
203 /// in other threads.
205 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
206 #[stable(feature = "rust1", since = "1.0.0")]
208 /// Has the effects of both [`Acquire`] and [`Release`] together.
210 /// This ordering is only applicable for operations that combine both loads and stores.
212 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
214 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
215 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
216 #[stable(feature = "rust1", since = "1.0.0")]
218 /// Like `AcqRel` with the additional guarantee that all threads see all
219 /// sequentially consistent operations in the same order.
220 #[stable(feature = "rust1", since = "1.0.0")]
222 // Prevent exhaustive matching to allow for future extension
224 #[unstable(feature = "future_atomic_orderings", issue = "0")]
228 /// An [`AtomicBool`] initialized to `false`.
230 /// [`AtomicBool`]: struct.AtomicBool.html
231 #[cfg(target_has_atomic = "8")]
232 #[stable(feature = "rust1", since = "1.0.0")]
233 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
235 #[cfg(target_has_atomic = "8")]
237 /// Creates a new `AtomicBool`.
242 /// use std::sync::atomic::AtomicBool;
244 /// let atomic_true = AtomicBool::new(true);
245 /// let atomic_false = AtomicBool::new(false);
248 #[stable(feature = "rust1", since = "1.0.0")]
249 pub const fn new(v: bool) -> AtomicBool {
250 AtomicBool { v: UnsafeCell::new(v as u8) }
253 /// Returns a mutable reference to the underlying [`bool`].
255 /// This is safe because the mutable reference guarantees that no other threads are
256 /// concurrently accessing the atomic data.
258 /// [`bool`]: ../../../std/primitive.bool.html
263 /// use std::sync::atomic::{AtomicBool, Ordering};
265 /// let mut some_bool = AtomicBool::new(true);
266 /// assert_eq!(*some_bool.get_mut(), true);
267 /// *some_bool.get_mut() = false;
268 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
271 #[stable(feature = "atomic_access", since = "1.15.0")]
272 pub fn get_mut(&mut self) -> &mut bool {
273 unsafe { &mut *(self.v.get() as *mut bool) }
276 /// Consumes the atomic and returns the contained value.
278 /// This is safe because passing `self` by value guarantees that no other threads are
279 /// concurrently accessing the atomic data.
284 /// use std::sync::atomic::AtomicBool;
286 /// let some_bool = AtomicBool::new(true);
287 /// assert_eq!(some_bool.into_inner(), true);
290 #[stable(feature = "atomic_access", since = "1.15.0")]
291 pub fn into_inner(self) -> bool {
292 self.v.into_inner() != 0
295 /// Loads a value from the bool.
297 /// `load` takes an [`Ordering`] argument which describes the memory ordering
298 /// of this operation.
302 /// Panics if `order` is [`Release`] or [`AcqRel`].
304 /// [`Ordering`]: enum.Ordering.html
305 /// [`Release`]: enum.Ordering.html#variant.Release
306 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
311 /// use std::sync::atomic::{AtomicBool, Ordering};
313 /// let some_bool = AtomicBool::new(true);
315 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
318 #[stable(feature = "rust1", since = "1.0.0")]
319 pub fn load(&self, order: Ordering) -> bool {
320 unsafe { atomic_load(self.v.get(), order) != 0 }
323 /// Stores a value into the bool.
325 /// `store` takes an [`Ordering`] argument which describes the memory ordering
326 /// of this operation.
328 /// [`Ordering`]: enum.Ordering.html
333 /// use std::sync::atomic::{AtomicBool, Ordering};
335 /// let some_bool = AtomicBool::new(true);
337 /// some_bool.store(false, Ordering::Relaxed);
338 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
343 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
345 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
346 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
348 #[stable(feature = "rust1", since = "1.0.0")]
349 pub fn store(&self, val: bool, order: Ordering) {
351 atomic_store(self.v.get(), val as u8, order);
355 /// Stores a value into the bool, returning the previous value.
357 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
358 /// of this operation.
360 /// [`Ordering`]: enum.Ordering.html
365 /// use std::sync::atomic::{AtomicBool, Ordering};
367 /// let some_bool = AtomicBool::new(true);
369 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
370 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
373 #[stable(feature = "rust1", since = "1.0.0")]
374 #[cfg(any(stage0, target_has_atomic = "cas"))]
375 pub fn swap(&self, val: bool, order: Ordering) -> bool {
376 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
379 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
381 /// The return value is always the previous value. If it is equal to `current`, then the value
384 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
385 /// ordering of this operation.
387 /// [`Ordering`]: enum.Ordering.html
388 /// [`bool`]: ../../../std/primitive.bool.html
393 /// use std::sync::atomic::{AtomicBool, Ordering};
395 /// let some_bool = AtomicBool::new(true);
397 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
398 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
400 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
401 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
404 #[stable(feature = "rust1", since = "1.0.0")]
405 #[cfg(any(stage0, target_has_atomic = "cas"))]
406 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
407 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
413 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
415 /// The return value is a result indicating whether the new value was written and containing
416 /// the previous value. On success this value is guaranteed to be equal to `current`.
418 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
419 /// ordering of this operation. The first describes the required ordering if the
420 /// operation succeeds while the second describes the required ordering when the
421 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
422 /// be equivalent or weaker than the success ordering.
424 /// [`bool`]: ../../../std/primitive.bool.html
425 /// [`Ordering`]: enum.Ordering.html
426 /// [`Release`]: enum.Ordering.html#variant.Release
427 /// [`AcqRel`]: enum.Ordering.html#variant.Release
432 /// use std::sync::atomic::{AtomicBool, Ordering};
434 /// let some_bool = AtomicBool::new(true);
436 /// assert_eq!(some_bool.compare_exchange(true,
438 /// Ordering::Acquire,
439 /// Ordering::Relaxed),
441 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
443 /// assert_eq!(some_bool.compare_exchange(true, true,
444 /// Ordering::SeqCst,
445 /// Ordering::Acquire),
447 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
450 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
451 #[cfg(any(stage0, target_has_atomic = "cas"))]
452 pub fn compare_exchange(&self,
457 -> Result<bool, bool> {
459 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
462 Err(x) => Err(x != 0),
466 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
468 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
469 /// comparison succeeds, which can result in more efficient code on some platforms. The
470 /// return value is a result indicating whether the new value was written and containing the
473 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
474 /// ordering of this operation. The first describes the required ordering if the operation
475 /// succeeds while the second describes the required ordering when the operation fails. The
476 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
477 /// weaker than the success ordering.
479 /// [`bool`]: ../../../std/primitive.bool.html
480 /// [`compare_exchange`]: #method.compare_exchange
481 /// [`Ordering`]: enum.Ordering.html
482 /// [`Release`]: enum.Ordering.html#variant.Release
483 /// [`AcqRel`]: enum.Ordering.html#variant.Release
488 /// use std::sync::atomic::{AtomicBool, Ordering};
490 /// let val = AtomicBool::new(false);
493 /// let mut old = val.load(Ordering::Relaxed);
495 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
497 /// Err(x) => old = x,
502 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
503 pub fn compare_exchange_weak(&self,
508 -> Result<bool, bool> {
510 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
513 Err(x) => Err(x != 0),
517 /// Logical "and" with a boolean value.
519 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
520 /// the new value to the result.
522 /// Returns the previous value.
527 /// use std::sync::atomic::{AtomicBool, Ordering};
529 /// let foo = AtomicBool::new(true);
530 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
531 /// assert_eq!(foo.load(Ordering::SeqCst), false);
533 /// let foo = AtomicBool::new(true);
534 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
535 /// assert_eq!(foo.load(Ordering::SeqCst), true);
537 /// let foo = AtomicBool::new(false);
538 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
539 /// assert_eq!(foo.load(Ordering::SeqCst), false);
542 #[stable(feature = "rust1", since = "1.0.0")]
543 #[cfg(any(stage0, target_has_atomic = "cas"))]
544 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
545 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
548 /// Logical "nand" with a boolean value.
550 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
551 /// the new value to the result.
553 /// Returns the previous value.
558 /// use std::sync::atomic::{AtomicBool, Ordering};
560 /// let foo = AtomicBool::new(true);
561 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
562 /// assert_eq!(foo.load(Ordering::SeqCst), true);
564 /// let foo = AtomicBool::new(true);
565 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
566 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
567 /// assert_eq!(foo.load(Ordering::SeqCst), false);
569 /// let foo = AtomicBool::new(false);
570 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
571 /// assert_eq!(foo.load(Ordering::SeqCst), true);
574 #[stable(feature = "rust1", since = "1.0.0")]
575 #[cfg(any(stage0, target_has_atomic = "cas"))]
576 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
577 // We can't use atomic_nand here because it can result in a bool with
578 // an invalid value. This happens because the atomic operation is done
579 // with an 8-bit integer internally, which would set the upper 7 bits.
580 // So we just use fetch_xor or swap instead.
583 // We must invert the bool.
584 self.fetch_xor(true, order)
586 // !(x & false) == true
587 // We must set the bool to true.
588 self.swap(true, order)
592 /// Logical "or" with a boolean value.
594 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
595 /// new value to the result.
597 /// Returns the previous value.
602 /// use std::sync::atomic::{AtomicBool, Ordering};
604 /// let foo = AtomicBool::new(true);
605 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
606 /// assert_eq!(foo.load(Ordering::SeqCst), true);
608 /// let foo = AtomicBool::new(true);
609 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
610 /// assert_eq!(foo.load(Ordering::SeqCst), true);
612 /// let foo = AtomicBool::new(false);
613 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
614 /// assert_eq!(foo.load(Ordering::SeqCst), false);
617 #[stable(feature = "rust1", since = "1.0.0")]
618 #[cfg(any(stage0, target_has_atomic = "cas"))]
619 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
620 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
623 /// Logical "xor" with a boolean value.
625 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
626 /// the new value to the result.
628 /// Returns the previous value.
633 /// use std::sync::atomic::{AtomicBool, Ordering};
635 /// let foo = AtomicBool::new(true);
636 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
637 /// assert_eq!(foo.load(Ordering::SeqCst), true);
639 /// let foo = AtomicBool::new(true);
640 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
641 /// assert_eq!(foo.load(Ordering::SeqCst), false);
643 /// let foo = AtomicBool::new(false);
644 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
645 /// assert_eq!(foo.load(Ordering::SeqCst), false);
648 #[stable(feature = "rust1", since = "1.0.0")]
649 #[cfg(any(stage0, target_has_atomic = "cas"))]
650 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
651 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
655 #[cfg(target_has_atomic = "ptr")]
656 impl<T> AtomicPtr<T> {
657 /// Creates a new `AtomicPtr`.
662 /// use std::sync::atomic::AtomicPtr;
664 /// let ptr = &mut 5;
665 /// let atomic_ptr = AtomicPtr::new(ptr);
668 #[stable(feature = "rust1", since = "1.0.0")]
669 pub const fn new(p: *mut T) -> AtomicPtr<T> {
670 AtomicPtr { p: UnsafeCell::new(p) }
673 /// Returns a mutable reference to the underlying pointer.
675 /// This is safe because the mutable reference guarantees that no other threads are
676 /// concurrently accessing the atomic data.
681 /// use std::sync::atomic::{AtomicPtr, Ordering};
683 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
684 /// *atomic_ptr.get_mut() = &mut 5;
685 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
688 #[stable(feature = "atomic_access", since = "1.15.0")]
689 pub fn get_mut(&mut self) -> &mut *mut T {
690 unsafe { &mut *self.p.get() }
693 /// Consumes the atomic and returns the contained value.
695 /// This is safe because passing `self` by value guarantees that no other threads are
696 /// concurrently accessing the atomic data.
701 /// use std::sync::atomic::AtomicPtr;
703 /// let atomic_ptr = AtomicPtr::new(&mut 5);
704 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
707 #[stable(feature = "atomic_access", since = "1.15.0")]
708 pub fn into_inner(self) -> *mut T {
712 /// Loads a value from the pointer.
714 /// `load` takes an [`Ordering`] argument which describes the memory ordering
715 /// of this operation.
719 /// Panics if `order` is [`Release`] or [`AcqRel`].
721 /// [`Ordering`]: enum.Ordering.html
722 /// [`Release`]: enum.Ordering.html#variant.Release
723 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
728 /// use std::sync::atomic::{AtomicPtr, Ordering};
730 /// let ptr = &mut 5;
731 /// let some_ptr = AtomicPtr::new(ptr);
733 /// let value = some_ptr.load(Ordering::Relaxed);
736 #[stable(feature = "rust1", since = "1.0.0")]
737 pub fn load(&self, order: Ordering) -> *mut T {
738 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
741 /// Stores a value into the pointer.
743 /// `store` takes an [`Ordering`] argument which describes the memory ordering
744 /// of this operation.
746 /// [`Ordering`]: enum.Ordering.html
751 /// use std::sync::atomic::{AtomicPtr, Ordering};
753 /// let ptr = &mut 5;
754 /// let some_ptr = AtomicPtr::new(ptr);
756 /// let other_ptr = &mut 10;
758 /// some_ptr.store(other_ptr, Ordering::Relaxed);
763 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
765 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
766 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
768 #[stable(feature = "rust1", since = "1.0.0")]
769 pub fn store(&self, ptr: *mut T, order: Ordering) {
771 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
775 /// Stores a value into the pointer, returning the previous value.
777 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
778 /// of this operation.
780 /// [`Ordering`]: enum.Ordering.html
785 /// use std::sync::atomic::{AtomicPtr, Ordering};
787 /// let ptr = &mut 5;
788 /// let some_ptr = AtomicPtr::new(ptr);
790 /// let other_ptr = &mut 10;
792 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
795 #[stable(feature = "rust1", since = "1.0.0")]
796 #[cfg(any(stage0, target_has_atomic = "cas"))]
797 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
798 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
801 /// Stores a value into the pointer if the current value is the same as the `current` value.
803 /// The return value is always the previous value. If it is equal to `current`, then the value
806 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
807 /// ordering of this operation.
809 /// [`Ordering`]: enum.Ordering.html
814 /// use std::sync::atomic::{AtomicPtr, Ordering};
816 /// let ptr = &mut 5;
817 /// let some_ptr = AtomicPtr::new(ptr);
819 /// let other_ptr = &mut 10;
820 /// let another_ptr = &mut 10;
822 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
825 #[stable(feature = "rust1", since = "1.0.0")]
826 #[cfg(any(stage0, target_has_atomic = "cas"))]
827 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
828 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
834 /// Stores a value into the pointer if the current value is the same as the `current` value.
836 /// The return value is a result indicating whether the new value was written and containing
837 /// the previous value. On success this value is guaranteed to be equal to `current`.
839 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
840 /// ordering of this operation. The first describes the required ordering if
841 /// the operation succeeds while the second describes the required ordering when
842 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
843 /// and must be equivalent or weaker than the success ordering.
845 /// [`Ordering`]: enum.Ordering.html
846 /// [`Release`]: enum.Ordering.html#variant.Release
847 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
852 /// use std::sync::atomic::{AtomicPtr, Ordering};
854 /// let ptr = &mut 5;
855 /// let some_ptr = AtomicPtr::new(ptr);
857 /// let other_ptr = &mut 10;
858 /// let another_ptr = &mut 10;
860 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
861 /// Ordering::SeqCst, Ordering::Relaxed);
864 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
865 #[cfg(any(stage0, target_has_atomic = "cas"))]
866 pub fn compare_exchange(&self,
871 -> Result<*mut T, *mut T> {
873 let res = atomic_compare_exchange(self.p.get() as *mut usize,
879 Ok(x) => Ok(x as *mut T),
880 Err(x) => Err(x as *mut T),
885 /// Stores a value into the pointer if the current value is the same as the `current` value.
887 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
888 /// comparison succeeds, which can result in more efficient code on some platforms. The
889 /// return value is a result indicating whether the new value was written and containing the
892 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
893 /// ordering of this operation. The first describes the required ordering if the operation
894 /// succeeds while the second describes the required ordering when the operation fails. The
895 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
896 /// weaker than the success ordering.
898 /// [`compare_exchange`]: #method.compare_exchange
899 /// [`Ordering`]: enum.Ordering.html
900 /// [`Release`]: enum.Ordering.html#variant.Release
901 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
906 /// use std::sync::atomic::{AtomicPtr, Ordering};
908 /// let some_ptr = AtomicPtr::new(&mut 5);
910 /// let new = &mut 10;
911 /// let mut old = some_ptr.load(Ordering::Relaxed);
913 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
915 /// Err(x) => old = x,
920 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
921 pub fn compare_exchange_weak(&self,
926 -> Result<*mut T, *mut T> {
928 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
934 Ok(x) => Ok(x as *mut T),
935 Err(x) => Err(x as *mut T),
941 #[cfg(target_has_atomic = "8")]
942 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
943 impl From<bool> for AtomicBool {
945 fn from(b: bool) -> Self { Self::new(b) }
948 #[cfg(target_has_atomic = "ptr")]
949 #[stable(feature = "atomic_from", since = "1.23.0")]
950 impl<T> From<*mut T> for AtomicPtr<T> {
952 fn from(p: *mut T) -> Self { Self::new(p) }
955 #[cfg(target_has_atomic = "ptr")]
956 macro_rules! atomic_int {
963 $s_int_type:expr, $int_ref:expr,
965 $min_fn:ident, $max_fn:ident,
966 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
967 /// An integer type which can be safely shared between threads.
969 /// This type has the same in-memory representation as the underlying
974 /// ). For more about the differences between atomic types and
975 /// non-atomic types, please see the [module-level documentation].
977 /// [module-level documentation]: index.html
979 pub struct $atomic_type {
980 v: UnsafeCell<$int_type>,
983 /// An atomic integer initialized to `0`.
985 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
988 impl Default for $atomic_type {
989 fn default() -> Self {
990 Self::new(Default::default())
995 impl From<$int_type> for $atomic_type {
997 fn from(v: $int_type) -> Self { Self::new(v) }
1001 impl fmt::Debug for $atomic_type {
1002 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1003 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1007 // Send is implicitly implemented.
1009 unsafe impl Sync for $atomic_type {}
1013 concat!("Creates a new atomic integer.
1018 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1020 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1024 pub const fn new(v: $int_type) -> Self {
1025 $atomic_type {v: UnsafeCell::new(v)}
1030 concat!("Returns a mutable reference to the underlying integer.
1032 This is safe because the mutable reference guarantees that no other threads are
1033 concurrently accessing the atomic data.
1038 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1040 let mut some_var = ", stringify!($atomic_type), "::new(10);
1041 assert_eq!(*some_var.get_mut(), 10);
1042 *some_var.get_mut() = 5;
1043 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1047 pub fn get_mut(&mut self) -> &mut $int_type {
1048 unsafe { &mut *self.v.get() }
1053 concat!("Consumes the atomic and returns the contained value.
1055 This is safe because passing `self` by value guarantees that no other threads are
1056 concurrently accessing the atomic data.
1061 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1063 let some_var = ", stringify!($atomic_type), "::new(5);
1064 assert_eq!(some_var.into_inner(), 5);
1068 pub fn into_inner(self) -> $int_type {
1074 concat!("Loads a value from the atomic integer.
1076 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1080 Panics if `order` is [`Release`] or [`AcqRel`].
1082 [`Ordering`]: enum.Ordering.html
1083 [`Release`]: enum.Ordering.html#variant.Release
1084 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1089 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1091 let some_var = ", stringify!($atomic_type), "::new(5);
1093 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1097 pub fn load(&self, order: Ordering) -> $int_type {
1098 unsafe { atomic_load(self.v.get(), order) }
1103 concat!("Stores a value into the atomic integer.
1105 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1107 [`Ordering`]: enum.Ordering.html
1112 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1114 let some_var = ", stringify!($atomic_type), "::new(5);
1116 some_var.store(10, Ordering::Relaxed);
1117 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1122 Panics if `order` is [`Acquire`] or [`AcqRel`].
1124 [`Acquire`]: enum.Ordering.html#variant.Acquire
1125 [`AcqRel`]: enum.Ordering.html#variant.AcqRel"),
1128 pub fn store(&self, val: $int_type, order: Ordering) {
1129 unsafe { atomic_store(self.v.get(), val, order); }
1134 concat!("Stores a value into the atomic integer, returning the previous value.
1136 `swap` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1138 [`Ordering`]: enum.Ordering.html
1143 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1145 let some_var = ", stringify!($atomic_type), "::new(5);
1147 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1151 #[cfg(any(stage0, target_has_atomic = "cas"))]
1152 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1153 unsafe { atomic_swap(self.v.get(), val, order) }
1158 concat!("Stores a value into the atomic integer if the current value is the same as
1159 the `current` value.
1161 The return value is always the previous value. If it is equal to `current`, then the
1164 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1165 ordering of this operation.
1167 [`Ordering`]: enum.Ordering.html
1172 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1174 let some_var = ", stringify!($atomic_type), "::new(5);
1176 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1177 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1179 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1180 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1184 #[cfg(any(stage0, target_has_atomic = "cas"))]
1185 pub fn compare_and_swap(&self,
1188 order: Ordering) -> $int_type {
1189 match self.compare_exchange(current,
1192 strongest_failure_ordering(order)) {
1200 concat!("Stores a value into the atomic integer if the current value is the same as
1201 the `current` value.
1203 The return value is a result indicating whether the new value was written and
1204 containing the previous value. On success this value is guaranteed to be equal to
1207 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1208 ordering of this operation. The first describes the required ordering if
1209 the operation succeeds while the second describes the required ordering when
1210 the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1211 must be equivalent or weaker than the success ordering.
1213 [`Ordering`]: enum.Ordering.html
1214 [`Release`]: enum.Ordering.html#variant.Release
1215 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1220 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1222 let some_var = ", stringify!($atomic_type), "::new(5);
1224 assert_eq!(some_var.compare_exchange(5, 10,
1228 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1230 assert_eq!(some_var.compare_exchange(6, 12,
1234 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1238 #[cfg(any(stage0, target_has_atomic = "cas"))]
1239 pub fn compare_exchange(&self,
1243 failure: Ordering) -> Result<$int_type, $int_type> {
1244 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1249 concat!("Stores a value into the atomic integer if the current value is the same as
1250 the `current` value.
1252 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1253 when the comparison succeeds, which can result in more efficient code on some
1254 platforms. The return value is a result indicating whether the new value was
1255 written and containing the previous value.
1257 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1258 ordering of this operation. The first describes the required ordering if the
1259 operation succeeds while the second describes the required ordering when the
1260 operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1261 must be equivalent or weaker than the success ordering.
1263 [`compare_exchange`]: #method.compare_exchange
1264 [`Ordering`]: enum.Ordering.html
1265 [`Release`]: enum.Ordering.html#variant.Release
1266 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1271 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1273 let val = ", stringify!($atomic_type), "::new(4);
1275 let mut old = val.load(Ordering::Relaxed);
1278 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1286 pub fn compare_exchange_weak(&self,
1290 failure: Ordering) -> Result<$int_type, $int_type> {
1292 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1298 concat!("Adds to the current value, returning the previous value.
1300 This operation wraps around on overflow.
1305 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1307 let foo = ", stringify!($atomic_type), "::new(0);
1308 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1309 assert_eq!(foo.load(Ordering::SeqCst), 10);
1313 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1314 unsafe { atomic_add(self.v.get(), val, order) }
1319 concat!("Subtracts from the current value, returning the previous value.
1321 This operation wraps around on overflow.
1326 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1328 let foo = ", stringify!($atomic_type), "::new(20);
1329 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1330 assert_eq!(foo.load(Ordering::SeqCst), 10);
1334 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1335 unsafe { atomic_sub(self.v.get(), val, order) }
1340 concat!("Bitwise \"and\" with the current value.
1342 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1343 sets the new value to the result.
1345 Returns the previous value.
1350 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1352 let foo = ", stringify!($atomic_type), "::new(0b101101);
1353 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1354 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1358 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1359 unsafe { atomic_and(self.v.get(), val, order) }
1364 concat!("Bitwise \"nand\" with the current value.
1366 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1367 sets the new value to the result.
1369 Returns the previous value.
1374 ", $extra_feature, "
1375 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1377 let foo = ", stringify!($atomic_type), "::new(0x13);
1378 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1379 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1383 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1384 unsafe { atomic_nand(self.v.get(), val, order) }
1389 concat!("Bitwise \"or\" with the current value.
1391 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1392 sets the new value to the result.
1394 Returns the previous value.
1399 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1401 let foo = ", stringify!($atomic_type), "::new(0b101101);
1402 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1403 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1407 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1408 unsafe { atomic_or(self.v.get(), val, order) }
1413 concat!("Bitwise \"xor\" with the current value.
1415 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1416 sets the new value to the result.
1418 Returns the previous value.
1423 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1425 let foo = ", stringify!($atomic_type), "::new(0b101101);
1426 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1427 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1431 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1432 unsafe { atomic_xor(self.v.get(), val, order) }
1437 concat!("Fetches the value, and applies a function to it that returns an optional
1438 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1439 `Err(previous_value)`.
1441 Note: This may call the function multiple times if the value has been changed from other threads in
1442 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1443 but once to the stored value.
1448 #![feature(no_more_cas)]
1449 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1451 let x = ", stringify!($atomic_type), "::new(7);
1452 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1453 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1454 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1455 assert_eq!(x.load(Ordering::SeqCst), 9);
1458 #[unstable(feature = "no_more_cas",
1459 reason = "no more CAS loops in user code",
1461 pub fn fetch_update<F>(&self,
1463 fetch_order: Ordering,
1464 set_order: Ordering) -> Result<$int_type, $int_type>
1465 where F: FnMut($int_type) -> Option<$int_type> {
1466 let mut prev = self.load(fetch_order);
1467 while let Some(next) = f(prev) {
1468 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1469 x @ Ok(_) => return x,
1470 Err(next_prev) => prev = next_prev
1478 concat!("Maximum with the current value.
1480 Finds the maximum of the current value and the argument `val`, and
1481 sets the new value to the result.
1483 Returns the previous value.
1488 #![feature(atomic_min_max)]
1489 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1491 let foo = ", stringify!($atomic_type), "::new(23);
1492 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1493 assert_eq!(foo.load(Ordering::SeqCst), 42);
1496 If you want to obtain the maximum value in one step, you can use the following:
1499 #![feature(atomic_min_max)]
1500 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1502 let foo = ", stringify!($atomic_type), "::new(23);
1504 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1505 assert!(max_foo == 42);
1508 #[unstable(feature = "atomic_min_max",
1509 reason = "easier and faster min/max than writing manual CAS loop",
1511 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1512 unsafe { $max_fn(self.v.get(), val, order) }
1517 concat!("Minimum with the current value.
1519 Finds the minimum of the current value and the argument `val`, and
1520 sets the new value to the result.
1522 Returns the previous value.
1527 #![feature(atomic_min_max)]
1528 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1530 let foo = ", stringify!($atomic_type), "::new(23);
1531 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1532 assert_eq!(foo.load(Ordering::Relaxed), 23);
1533 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1534 assert_eq!(foo.load(Ordering::Relaxed), 22);
1537 If you want to obtain the minimum value in one step, you can use the following:
1540 #![feature(atomic_min_max)]
1541 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1543 let foo = ", stringify!($atomic_type), "::new(23);
1545 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1546 assert_eq!(min_foo, 12);
1549 #[unstable(feature = "atomic_min_max",
1550 reason = "easier and faster min/max than writing manual CAS loop",
1552 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1553 unsafe { $min_fn(self.v.get(), val, order) }
1561 #[cfg(target_has_atomic = "8")]
1563 unstable(feature = "integer_atomics", issue = "32976"),
1564 unstable(feature = "integer_atomics", issue = "32976"),
1565 unstable(feature = "integer_atomics", issue = "32976"),
1566 unstable(feature = "integer_atomics", issue = "32976"),
1567 unstable(feature = "integer_atomics", issue = "32976"),
1568 unstable(feature = "integer_atomics", issue = "32976"),
1569 "i8", "../../../std/primitive.i8.html",
1570 "#![feature(integer_atomics)]\n\n",
1571 atomic_min, atomic_max,
1572 i8 AtomicI8 ATOMIC_I8_INIT
1574 #[cfg(target_has_atomic = "8")]
1576 unstable(feature = "integer_atomics", issue = "32976"),
1577 unstable(feature = "integer_atomics", issue = "32976"),
1578 unstable(feature = "integer_atomics", issue = "32976"),
1579 unstable(feature = "integer_atomics", issue = "32976"),
1580 unstable(feature = "integer_atomics", issue = "32976"),
1581 unstable(feature = "integer_atomics", issue = "32976"),
1582 "u8", "../../../std/primitive.u8.html",
1583 "#![feature(integer_atomics)]\n\n",
1584 atomic_umin, atomic_umax,
1585 u8 AtomicU8 ATOMIC_U8_INIT
1587 #[cfg(target_has_atomic = "16")]
1589 unstable(feature = "integer_atomics", issue = "32976"),
1590 unstable(feature = "integer_atomics", issue = "32976"),
1591 unstable(feature = "integer_atomics", issue = "32976"),
1592 unstable(feature = "integer_atomics", issue = "32976"),
1593 unstable(feature = "integer_atomics", issue = "32976"),
1594 unstable(feature = "integer_atomics", issue = "32976"),
1595 "i16", "../../../std/primitive.i16.html",
1596 "#![feature(integer_atomics)]\n\n",
1597 atomic_min, atomic_max,
1598 i16 AtomicI16 ATOMIC_I16_INIT
1600 #[cfg(target_has_atomic = "16")]
1602 unstable(feature = "integer_atomics", issue = "32976"),
1603 unstable(feature = "integer_atomics", issue = "32976"),
1604 unstable(feature = "integer_atomics", issue = "32976"),
1605 unstable(feature = "integer_atomics", issue = "32976"),
1606 unstable(feature = "integer_atomics", issue = "32976"),
1607 unstable(feature = "integer_atomics", issue = "32976"),
1608 "u16", "../../../std/primitive.u16.html",
1609 "#![feature(integer_atomics)]\n\n",
1610 atomic_umin, atomic_umax,
1611 u16 AtomicU16 ATOMIC_U16_INIT
1613 #[cfg(target_has_atomic = "32")]
1615 unstable(feature = "integer_atomics", issue = "32976"),
1616 unstable(feature = "integer_atomics", issue = "32976"),
1617 unstable(feature = "integer_atomics", issue = "32976"),
1618 unstable(feature = "integer_atomics", issue = "32976"),
1619 unstable(feature = "integer_atomics", issue = "32976"),
1620 unstable(feature = "integer_atomics", issue = "32976"),
1621 "i32", "../../../std/primitive.i32.html",
1622 "#![feature(integer_atomics)]\n\n",
1623 atomic_min, atomic_max,
1624 i32 AtomicI32 ATOMIC_I32_INIT
1626 #[cfg(target_has_atomic = "32")]
1628 unstable(feature = "integer_atomics", issue = "32976"),
1629 unstable(feature = "integer_atomics", issue = "32976"),
1630 unstable(feature = "integer_atomics", issue = "32976"),
1631 unstable(feature = "integer_atomics", issue = "32976"),
1632 unstable(feature = "integer_atomics", issue = "32976"),
1633 unstable(feature = "integer_atomics", issue = "32976"),
1634 "u32", "../../../std/primitive.u32.html",
1635 "#![feature(integer_atomics)]\n\n",
1636 atomic_umin, atomic_umax,
1637 u32 AtomicU32 ATOMIC_U32_INIT
1639 #[cfg(target_has_atomic = "64")]
1641 unstable(feature = "integer_atomics", issue = "32976"),
1642 unstable(feature = "integer_atomics", issue = "32976"),
1643 unstable(feature = "integer_atomics", issue = "32976"),
1644 unstable(feature = "integer_atomics", issue = "32976"),
1645 unstable(feature = "integer_atomics", issue = "32976"),
1646 unstable(feature = "integer_atomics", issue = "32976"),
1647 "i64", "../../../std/primitive.i64.html",
1648 "#![feature(integer_atomics)]\n\n",
1649 atomic_min, atomic_max,
1650 i64 AtomicI64 ATOMIC_I64_INIT
1652 #[cfg(target_has_atomic = "64")]
1654 unstable(feature = "integer_atomics", issue = "32976"),
1655 unstable(feature = "integer_atomics", issue = "32976"),
1656 unstable(feature = "integer_atomics", issue = "32976"),
1657 unstable(feature = "integer_atomics", issue = "32976"),
1658 unstable(feature = "integer_atomics", issue = "32976"),
1659 unstable(feature = "integer_atomics", issue = "32976"),
1660 "u64", "../../../std/primitive.u64.html",
1661 "#![feature(integer_atomics)]\n\n",
1662 atomic_umin, atomic_umax,
1663 u64 AtomicU64 ATOMIC_U64_INIT
1665 #[cfg(target_has_atomic = "ptr")]
1667 stable(feature = "rust1", since = "1.0.0"),
1668 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1669 stable(feature = "atomic_debug", since = "1.3.0"),
1670 stable(feature = "atomic_access", since = "1.15.0"),
1671 stable(feature = "atomic_from", since = "1.23.0"),
1672 stable(feature = "atomic_nand", since = "1.27.0"),
1673 "isize", "../../../std/primitive.isize.html",
1675 atomic_min, atomic_max,
1676 isize AtomicIsize ATOMIC_ISIZE_INIT
1678 #[cfg(target_has_atomic = "ptr")]
1680 stable(feature = "rust1", since = "1.0.0"),
1681 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1682 stable(feature = "atomic_debug", since = "1.3.0"),
1683 stable(feature = "atomic_access", since = "1.15.0"),
1684 stable(feature = "atomic_from", since = "1.23.0"),
1685 stable(feature = "atomic_nand", since = "1.27.0"),
1686 "usize", "../../../std/primitive.usize.html",
1688 atomic_umin, atomic_umax,
1689 usize AtomicUsize ATOMIC_USIZE_INIT
1693 #[cfg(any(stage0, target_has_atomic = "cas"))]
1694 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1701 __Nonexhaustive => __Nonexhaustive,
1706 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1708 Release => intrinsics::atomic_store_rel(dst, val),
1709 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1710 SeqCst => intrinsics::atomic_store(dst, val),
1711 Acquire => panic!("there is no such thing as an acquire store"),
1712 AcqRel => panic!("there is no such thing as an acquire/release store"),
1713 __Nonexhaustive => panic!("invalid memory ordering"),
1718 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1720 Acquire => intrinsics::atomic_load_acq(dst),
1721 Relaxed => intrinsics::atomic_load_relaxed(dst),
1722 SeqCst => intrinsics::atomic_load(dst),
1723 Release => panic!("there is no such thing as a release load"),
1724 AcqRel => panic!("there is no such thing as an acquire/release load"),
1725 __Nonexhaustive => panic!("invalid memory ordering"),
1730 #[cfg(any(stage0, target_has_atomic = "cas"))]
1731 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1733 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1734 Release => intrinsics::atomic_xchg_rel(dst, val),
1735 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1736 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1737 SeqCst => intrinsics::atomic_xchg(dst, val),
1738 __Nonexhaustive => panic!("invalid memory ordering"),
1742 /// Returns the previous value (like __sync_fetch_and_add).
1744 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1746 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1747 Release => intrinsics::atomic_xadd_rel(dst, val),
1748 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1749 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1750 SeqCst => intrinsics::atomic_xadd(dst, val),
1751 __Nonexhaustive => panic!("invalid memory ordering"),
1755 /// Returns the previous value (like __sync_fetch_and_sub).
1757 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1759 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1760 Release => intrinsics::atomic_xsub_rel(dst, val),
1761 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1762 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1763 SeqCst => intrinsics::atomic_xsub(dst, val),
1764 __Nonexhaustive => panic!("invalid memory ordering"),
1769 #[cfg(any(stage0, target_has_atomic = "cas"))]
1770 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1776 let (val, ok) = match (success, failure) {
1777 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1778 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1779 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1780 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1781 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1782 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1783 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1784 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1785 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1786 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1787 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1788 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1789 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1790 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1792 if ok { Ok(val) } else { Err(val) }
1796 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1802 let (val, ok) = match (success, failure) {
1803 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1804 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1805 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1806 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1807 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1808 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1809 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1810 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1811 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1812 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1813 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1814 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1815 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1816 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1818 if ok { Ok(val) } else { Err(val) }
1822 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1824 Acquire => intrinsics::atomic_and_acq(dst, val),
1825 Release => intrinsics::atomic_and_rel(dst, val),
1826 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1827 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1828 SeqCst => intrinsics::atomic_and(dst, val),
1829 __Nonexhaustive => panic!("invalid memory ordering"),
1834 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
1836 Acquire => intrinsics::atomic_nand_acq(dst, val),
1837 Release => intrinsics::atomic_nand_rel(dst, val),
1838 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
1839 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
1840 SeqCst => intrinsics::atomic_nand(dst, val),
1841 __Nonexhaustive => panic!("invalid memory ordering"),
1846 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1848 Acquire => intrinsics::atomic_or_acq(dst, val),
1849 Release => intrinsics::atomic_or_rel(dst, val),
1850 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1851 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1852 SeqCst => intrinsics::atomic_or(dst, val),
1853 __Nonexhaustive => panic!("invalid memory ordering"),
1858 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1860 Acquire => intrinsics::atomic_xor_acq(dst, val),
1861 Release => intrinsics::atomic_xor_rel(dst, val),
1862 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1863 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1864 SeqCst => intrinsics::atomic_xor(dst, val),
1865 __Nonexhaustive => panic!("invalid memory ordering"),
1869 /// returns the max value (signed comparison)
1871 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
1873 Acquire => intrinsics::atomic_max_acq(dst, val),
1874 Release => intrinsics::atomic_max_rel(dst, val),
1875 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
1876 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
1877 SeqCst => intrinsics::atomic_max(dst, val),
1878 __Nonexhaustive => panic!("invalid memory ordering"),
1882 /// returns the min value (signed comparison)
1884 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
1886 Acquire => intrinsics::atomic_min_acq(dst, val),
1887 Release => intrinsics::atomic_min_rel(dst, val),
1888 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
1889 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
1890 SeqCst => intrinsics::atomic_min(dst, val),
1891 __Nonexhaustive => panic!("invalid memory ordering"),
1895 /// returns the max value (signed comparison)
1897 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
1899 Acquire => intrinsics::atomic_umax_acq(dst, val),
1900 Release => intrinsics::atomic_umax_rel(dst, val),
1901 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
1902 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
1903 SeqCst => intrinsics::atomic_umax(dst, val),
1904 __Nonexhaustive => panic!("invalid memory ordering"),
1908 /// returns the min value (signed comparison)
1910 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
1912 Acquire => intrinsics::atomic_umin_acq(dst, val),
1913 Release => intrinsics::atomic_umin_rel(dst, val),
1914 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
1915 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
1916 SeqCst => intrinsics::atomic_umin(dst, val),
1917 __Nonexhaustive => panic!("invalid memory ordering"),
1921 /// An atomic fence.
1923 /// Depending on the specified order, a fence prevents the compiler and CPU from
1924 /// reordering certain types of memory operations around it.
1925 /// That creates synchronizes-with relationships between it and atomic operations
1926 /// or fences in other threads.
1928 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
1929 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
1930 /// exist operations X and Y, both operating on some atomic object 'M' such
1931 /// that A is sequenced before X, Y is synchronized before B and Y observes
1932 /// the change to M. This provides a happens-before dependence between A and B.
1935 /// Thread 1 Thread 2
1937 /// fence(Release); A --------------
1938 /// x.store(3, Relaxed); X --------- |
1941 /// -------------> Y if x.load(Relaxed) == 3 {
1942 /// |-------> B fence(Acquire);
1947 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1950 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1951 /// and [`Release`] semantics, participates in the global program order of the
1952 /// other [`SeqCst`] operations and/or fences.
1954 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1958 /// Panics if `order` is [`Relaxed`].
1963 /// use std::sync::atomic::AtomicBool;
1964 /// use std::sync::atomic::fence;
1965 /// use std::sync::atomic::Ordering;
1967 /// // A mutual exclusion primitive based on spinlock.
1968 /// pub struct Mutex {
1969 /// flag: AtomicBool,
1973 /// pub fn new() -> Mutex {
1975 /// flag: AtomicBool::new(false),
1979 /// pub fn lock(&self) {
1980 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
1981 /// // This fence synchronizes-with store in `unlock`.
1982 /// fence(Ordering::Acquire);
1985 /// pub fn unlock(&self) {
1986 /// self.flag.store(false, Ordering::Release);
1991 /// [`Ordering`]: enum.Ordering.html
1992 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1993 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1994 /// [`Release`]: enum.Ordering.html#variant.Release
1995 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1996 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1998 #[stable(feature = "rust1", since = "1.0.0")]
1999 pub fn fence(order: Ordering) {
2002 Acquire => intrinsics::atomic_fence_acq(),
2003 Release => intrinsics::atomic_fence_rel(),
2004 AcqRel => intrinsics::atomic_fence_acqrel(),
2005 SeqCst => intrinsics::atomic_fence(),
2006 Relaxed => panic!("there is no such thing as a relaxed fence"),
2007 __Nonexhaustive => panic!("invalid memory ordering"),
2013 /// A compiler memory fence.
2015 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2016 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2017 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2018 /// or writes from before or after the call to the other side of the call to
2019 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2020 /// from doing such re-ordering. This is not a problem in a single-threaded,
2021 /// execution context, but when other threads may modify memory at the same
2022 /// time, stronger synchronization primitives such as [`fence`] are required.
2024 /// The re-ordering prevented by the different ordering semantics are:
2026 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2027 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2028 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2029 /// - with [`AcqRel`], both of the above rules are enforced.
2031 /// `compiler_fence` is generally only useful for preventing a thread from
2032 /// racing *with itself*. That is, if a given thread is executing one piece
2033 /// of code, and is then interrupted, and starts executing code elsewhere
2034 /// (while still in the same thread, and conceptually still on the same
2035 /// core). In traditional programs, this can only occur when a signal
2036 /// handler is registered. In more low-level code, such situations can also
2037 /// arise when handling interrupts, when implementing green threads with
2038 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2039 /// discussion of [memory barriers].
2043 /// Panics if `order` is [`Relaxed`].
2047 /// Without `compiler_fence`, the `assert_eq!` in following code
2048 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2049 /// To see why, remember that the compiler is free to swap the stores to
2050 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2051 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2052 /// after `IS_READY` is updated, then the signal handler will see
2053 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2054 /// Using a `compiler_fence` remedies this situation.
2057 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2058 /// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
2059 /// use std::sync::atomic::Ordering;
2060 /// use std::sync::atomic::compiler_fence;
2062 /// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
2063 /// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
2066 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2067 /// // prevent earlier writes from being moved beyond this point
2068 /// compiler_fence(Ordering::Release);
2069 /// IS_READY.store(true, Ordering::Relaxed);
2072 /// fn signal_handler() {
2073 /// if IS_READY.load(Ordering::Relaxed) {
2074 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2079 /// [`fence`]: fn.fence.html
2080 /// [`Ordering`]: enum.Ordering.html
2081 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2082 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2083 /// [`Release`]: enum.Ordering.html#variant.Release
2084 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2085 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2086 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2088 #[stable(feature = "compiler_fences", since = "1.21.0")]
2089 pub fn compiler_fence(order: Ordering) {
2092 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2093 Release => intrinsics::atomic_singlethreadfence_rel(),
2094 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2095 SeqCst => intrinsics::atomic_singlethreadfence(),
2096 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2097 __Nonexhaustive => panic!("invalid memory ordering"),
2103 #[cfg(target_has_atomic = "8")]
2104 #[stable(feature = "atomic_debug", since = "1.3.0")]
2105 impl fmt::Debug for AtomicBool {
2106 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2107 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2111 #[cfg(target_has_atomic = "ptr")]
2112 #[stable(feature = "atomic_debug", since = "1.3.0")]
2113 impl<T> fmt::Debug for AtomicPtr<T> {
2114 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2115 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2119 #[cfg(target_has_atomic = "ptr")]
2120 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2121 impl<T> fmt::Pointer for AtomicPtr<T> {
2122 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2123 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)