1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering::*;
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
99 /// This function is deliberately more primitive than
100 /// `std::thread::yield_now` and does not directly yield to the
101 /// system's scheduler. In some cases it might be useful to use a
102 /// combination of both functions. Careful benchmarking is advised.
104 /// On some platforms this function may not do anything at all.
106 #[unstable(feature = "hint_core_should_pause", issue = "41196")]
107 pub fn hint_core_should_pause()
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
111 asm!("pause" ::: "memory" : "volatile");
114 #[cfg(target_arch = "aarch64")]
116 asm!("yield" ::: "memory" : "volatile");
120 /// A boolean type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a `bool`.
123 #[cfg(target_has_atomic = "8")]
124 #[stable(feature = "rust1", since = "1.0.0")]
125 pub struct AtomicBool {
129 #[cfg(target_has_atomic = "8")]
130 #[stable(feature = "rust1", since = "1.0.0")]
131 impl Default for AtomicBool {
132 /// Creates an `AtomicBool` initialized to `false`.
133 fn default() -> Self {
138 // Send is implicitly implemented for AtomicBool.
139 #[cfg(target_has_atomic = "8")]
140 #[stable(feature = "rust1", since = "1.0.0")]
141 unsafe impl Sync for AtomicBool {}
143 /// A raw pointer type which can be safely shared between threads.
145 /// This type has the same in-memory representation as a `*mut T`.
146 #[cfg(target_has_atomic = "ptr")]
147 #[stable(feature = "rust1", since = "1.0.0")]
148 pub struct AtomicPtr<T> {
149 p: UnsafeCell<*mut T>,
152 #[cfg(target_has_atomic = "ptr")]
153 #[stable(feature = "rust1", since = "1.0.0")]
154 impl<T> Default for AtomicPtr<T> {
155 /// Creates a null `AtomicPtr<T>`.
156 fn default() -> AtomicPtr<T> {
157 AtomicPtr::new(::ptr::null_mut())
161 #[cfg(target_has_atomic = "ptr")]
162 #[stable(feature = "rust1", since = "1.0.0")]
163 unsafe impl<T> Send for AtomicPtr<T> {}
164 #[cfg(target_has_atomic = "ptr")]
165 #[stable(feature = "rust1", since = "1.0.0")]
166 unsafe impl<T> Sync for AtomicPtr<T> {}
168 /// Atomic memory orderings
170 /// Memory orderings limit the ways that both the compiler and CPU may reorder
171 /// instructions around atomic operations. At its most restrictive,
172 /// "sequentially consistent" atomics allow neither reads nor writes
173 /// to be moved either before or after the atomic operation; on the other end
174 /// "relaxed" atomics allow all reorderings.
176 /// Rust's memory orderings are [the same as
177 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
179 /// For more information see the [nomicon].
181 /// [nomicon]: ../../../nomicon/atomics.html
182 #[stable(feature = "rust1", since = "1.0.0")]
183 #[derive(Copy, Clone, Debug)]
185 /// No ordering constraints, only atomic operations.
187 /// Corresponds to LLVM's [`Monotonic`] ordering.
189 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
190 #[stable(feature = "rust1", since = "1.0.0")]
192 /// When coupled with a store, all previous writes become visible
193 /// to the other threads that perform a load with [`Acquire`] ordering
194 /// on the same value.
196 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
197 #[stable(feature = "rust1", since = "1.0.0")]
199 /// When coupled with a load, all subsequent loads will see data
200 /// written before a store with [`Release`] ordering on the same value
201 /// in other threads.
203 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
204 #[stable(feature = "rust1", since = "1.0.0")]
206 /// When coupled with a load, uses [`Acquire`] ordering, and with a store
207 /// [`Release`] ordering.
209 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
210 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
211 #[stable(feature = "rust1", since = "1.0.0")]
213 /// Like `AcqRel` with the additional guarantee that all threads see all
214 /// sequentially consistent operations in the same order.
215 #[stable(feature = "rust1", since = "1.0.0")]
217 // Prevent exhaustive matching to allow for future extension
219 #[unstable(feature = "future_atomic_orderings", issue = "0")]
223 /// An [`AtomicBool`] initialized to `false`.
225 /// [`AtomicBool`]: struct.AtomicBool.html
226 #[cfg(target_has_atomic = "8")]
227 #[stable(feature = "rust1", since = "1.0.0")]
228 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
230 #[cfg(target_has_atomic = "8")]
232 /// Creates a new `AtomicBool`.
237 /// use std::sync::atomic::AtomicBool;
239 /// let atomic_true = AtomicBool::new(true);
240 /// let atomic_false = AtomicBool::new(false);
243 #[stable(feature = "rust1", since = "1.0.0")]
244 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_atomic_bool_new"))]
245 pub const fn new(v: bool) -> AtomicBool {
246 AtomicBool { v: UnsafeCell::new(v as u8) }
249 /// Returns a mutable reference to the underlying `bool`.
251 /// This is safe because the mutable reference guarantees that no other threads are
252 /// concurrently accessing the atomic data.
257 /// use std::sync::atomic::{AtomicBool, Ordering};
259 /// let mut some_bool = AtomicBool::new(true);
260 /// assert_eq!(*some_bool.get_mut(), true);
261 /// *some_bool.get_mut() = false;
262 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
265 #[stable(feature = "atomic_access", since = "1.15.0")]
266 pub fn get_mut(&mut self) -> &mut bool {
267 unsafe { &mut *(self.v.get() as *mut bool) }
270 /// Consumes the atomic and returns the contained value.
272 /// This is safe because passing `self` by value guarantees that no other threads are
273 /// concurrently accessing the atomic data.
278 /// use std::sync::atomic::AtomicBool;
280 /// let some_bool = AtomicBool::new(true);
281 /// assert_eq!(some_bool.into_inner(), true);
284 #[stable(feature = "atomic_access", since = "1.15.0")]
285 pub fn into_inner(self) -> bool {
286 unsafe { self.v.into_inner() != 0 }
289 /// Loads a value from the bool.
291 /// `load` takes an [`Ordering`] argument which describes the memory ordering
292 /// of this operation.
296 /// Panics if `order` is [`Release`] or [`AcqRel`].
298 /// [`Ordering`]: enum.Ordering.html
299 /// [`Release`]: enum.Ordering.html#variant.Release
300 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
305 /// use std::sync::atomic::{AtomicBool, Ordering};
307 /// let some_bool = AtomicBool::new(true);
309 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
312 #[stable(feature = "rust1", since = "1.0.0")]
313 pub fn load(&self, order: Ordering) -> bool {
314 unsafe { atomic_load(self.v.get(), order) != 0 }
317 /// Stores a value into the bool.
319 /// `store` takes an [`Ordering`] argument which describes the memory ordering
320 /// of this operation.
322 /// [`Ordering`]: enum.Ordering.html
327 /// use std::sync::atomic::{AtomicBool, Ordering};
329 /// let some_bool = AtomicBool::new(true);
331 /// some_bool.store(false, Ordering::Relaxed);
332 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
337 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
339 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
340 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
342 #[stable(feature = "rust1", since = "1.0.0")]
343 pub fn store(&self, val: bool, order: Ordering) {
345 atomic_store(self.v.get(), val as u8, order);
349 /// Stores a value into the bool, returning the previous value.
351 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
352 /// of this operation.
354 /// [`Ordering`]: enum.Ordering.html
359 /// use std::sync::atomic::{AtomicBool, Ordering};
361 /// let some_bool = AtomicBool::new(true);
363 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
364 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
367 #[stable(feature = "rust1", since = "1.0.0")]
368 pub fn swap(&self, val: bool, order: Ordering) -> bool {
369 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
372 /// Stores a value into the `bool` if the current value is the same as the `current` value.
374 /// The return value is always the previous value. If it is equal to `current`, then the value
377 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
378 /// ordering of this operation.
380 /// [`Ordering`]: enum.Ordering.html
385 /// use std::sync::atomic::{AtomicBool, Ordering};
387 /// let some_bool = AtomicBool::new(true);
389 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
390 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
392 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
393 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
396 #[stable(feature = "rust1", since = "1.0.0")]
397 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
398 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
404 /// Stores a value into the `bool` if the current value is the same as the `current` value.
406 /// The return value is a result indicating whether the new value was written and containing
407 /// the previous value. On success this value is guaranteed to be equal to `current`.
409 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
410 /// ordering of this operation. The first describes the required ordering if the
411 /// operation succeeds while the second describes the required ordering when the
412 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
413 /// be equivalent or weaker than the success ordering.
415 /// [`Ordering`]: enum.Ordering.html
416 /// [`Release`]: enum.Ordering.html#variant.Release
417 /// [`AcqRel`]: enum.Ordering.html#variant.Release
422 /// use std::sync::atomic::{AtomicBool, Ordering};
424 /// let some_bool = AtomicBool::new(true);
426 /// assert_eq!(some_bool.compare_exchange(true,
428 /// Ordering::Acquire,
429 /// Ordering::Relaxed),
431 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
433 /// assert_eq!(some_bool.compare_exchange(true, true,
434 /// Ordering::SeqCst,
435 /// Ordering::Acquire),
437 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
440 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
441 pub fn compare_exchange(&self,
446 -> Result<bool, bool> {
448 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
451 Err(x) => Err(x != 0),
455 /// Stores a value into the `bool` if the current value is the same as the `current` value.
457 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
458 /// comparison succeeds, which can result in more efficient code on some platforms. The
459 /// return value is a result indicating whether the new value was written and containing the
462 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
463 /// ordering of this operation. The first describes the required ordering if the operation
464 /// succeeds while the second describes the required ordering when the operation fails. The
465 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
466 /// weaker than the success ordering.
468 /// [`compare_exchange`]: #method.compare_exchange
469 /// [`Ordering`]: enum.Ordering.html
470 /// [`Release`]: enum.Ordering.html#variant.Release
471 /// [`AcqRel`]: enum.Ordering.html#variant.Release
476 /// use std::sync::atomic::{AtomicBool, Ordering};
478 /// let val = AtomicBool::new(false);
481 /// let mut old = val.load(Ordering::Relaxed);
483 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
485 /// Err(x) => old = x,
490 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
491 pub fn compare_exchange_weak(&self,
496 -> Result<bool, bool> {
498 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
501 Err(x) => Err(x != 0),
505 /// Logical "and" with a boolean value.
507 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
508 /// the new value to the result.
510 /// Returns the previous value.
515 /// use std::sync::atomic::{AtomicBool, Ordering};
517 /// let foo = AtomicBool::new(true);
518 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
519 /// assert_eq!(foo.load(Ordering::SeqCst), false);
521 /// let foo = AtomicBool::new(true);
522 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
523 /// assert_eq!(foo.load(Ordering::SeqCst), true);
525 /// let foo = AtomicBool::new(false);
526 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
527 /// assert_eq!(foo.load(Ordering::SeqCst), false);
530 #[stable(feature = "rust1", since = "1.0.0")]
531 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
532 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
535 /// Logical "nand" with a boolean value.
537 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
538 /// the new value to the result.
540 /// Returns the previous value.
545 /// use std::sync::atomic::{AtomicBool, Ordering};
547 /// let foo = AtomicBool::new(true);
548 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
549 /// assert_eq!(foo.load(Ordering::SeqCst), true);
551 /// let foo = AtomicBool::new(true);
552 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
553 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
554 /// assert_eq!(foo.load(Ordering::SeqCst), false);
556 /// let foo = AtomicBool::new(false);
557 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
558 /// assert_eq!(foo.load(Ordering::SeqCst), true);
561 #[stable(feature = "rust1", since = "1.0.0")]
562 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
563 // We can't use atomic_nand here because it can result in a bool with
564 // an invalid value. This happens because the atomic operation is done
565 // with an 8-bit integer internally, which would set the upper 7 bits.
566 // So we just use fetch_xor or swap instead.
569 // We must invert the bool.
570 self.fetch_xor(true, order)
572 // !(x & false) == true
573 // We must set the bool to true.
574 self.swap(true, order)
578 /// Logical "or" with a boolean value.
580 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
581 /// new value to the result.
583 /// Returns the previous value.
588 /// use std::sync::atomic::{AtomicBool, Ordering};
590 /// let foo = AtomicBool::new(true);
591 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
592 /// assert_eq!(foo.load(Ordering::SeqCst), true);
594 /// let foo = AtomicBool::new(true);
595 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
596 /// assert_eq!(foo.load(Ordering::SeqCst), true);
598 /// let foo = AtomicBool::new(false);
599 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
600 /// assert_eq!(foo.load(Ordering::SeqCst), false);
603 #[stable(feature = "rust1", since = "1.0.0")]
604 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
605 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
608 /// Logical "xor" with a boolean value.
610 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
611 /// the new value to the result.
613 /// Returns the previous value.
618 /// use std::sync::atomic::{AtomicBool, Ordering};
620 /// let foo = AtomicBool::new(true);
621 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
622 /// assert_eq!(foo.load(Ordering::SeqCst), true);
624 /// let foo = AtomicBool::new(true);
625 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
626 /// assert_eq!(foo.load(Ordering::SeqCst), false);
628 /// let foo = AtomicBool::new(false);
629 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
630 /// assert_eq!(foo.load(Ordering::SeqCst), false);
633 #[stable(feature = "rust1", since = "1.0.0")]
634 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
635 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
639 #[cfg(target_has_atomic = "ptr")]
640 impl<T> AtomicPtr<T> {
641 /// Creates a new `AtomicPtr`.
646 /// use std::sync::atomic::AtomicPtr;
648 /// let ptr = &mut 5;
649 /// let atomic_ptr = AtomicPtr::new(ptr);
652 #[stable(feature = "rust1", since = "1.0.0")]
653 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_atomic_ptr_new"))]
654 pub const fn new(p: *mut T) -> AtomicPtr<T> {
655 AtomicPtr { p: UnsafeCell::new(p) }
658 /// Returns a mutable reference to the underlying pointer.
660 /// This is safe because the mutable reference guarantees that no other threads are
661 /// concurrently accessing the atomic data.
666 /// use std::sync::atomic::{AtomicPtr, Ordering};
668 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
669 /// *atomic_ptr.get_mut() = &mut 5;
670 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
673 #[stable(feature = "atomic_access", since = "1.15.0")]
674 pub fn get_mut(&mut self) -> &mut *mut T {
675 unsafe { &mut *self.p.get() }
678 /// Consumes the atomic and returns the contained value.
680 /// This is safe because passing `self` by value guarantees that no other threads are
681 /// concurrently accessing the atomic data.
686 /// use std::sync::atomic::AtomicPtr;
688 /// let atomic_ptr = AtomicPtr::new(&mut 5);
689 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
692 #[stable(feature = "atomic_access", since = "1.15.0")]
693 pub fn into_inner(self) -> *mut T {
694 unsafe { self.p.into_inner() }
697 /// Loads a value from the pointer.
699 /// `load` takes an [`Ordering`] argument which describes the memory ordering
700 /// of this operation.
704 /// Panics if `order` is [`Release`] or [`AcqRel`].
706 /// [`Ordering`]: enum.Ordering.html
707 /// [`Release`]: enum.Ordering.html#variant.Release
708 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
713 /// use std::sync::atomic::{AtomicPtr, Ordering};
715 /// let ptr = &mut 5;
716 /// let some_ptr = AtomicPtr::new(ptr);
718 /// let value = some_ptr.load(Ordering::Relaxed);
721 #[stable(feature = "rust1", since = "1.0.0")]
722 pub fn load(&self, order: Ordering) -> *mut T {
723 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
726 /// Stores a value into the pointer.
728 /// `store` takes an [`Ordering`] argument which describes the memory ordering
729 /// of this operation.
731 /// [`Ordering`]: enum.Ordering.html
736 /// use std::sync::atomic::{AtomicPtr, Ordering};
738 /// let ptr = &mut 5;
739 /// let some_ptr = AtomicPtr::new(ptr);
741 /// let other_ptr = &mut 10;
743 /// some_ptr.store(other_ptr, Ordering::Relaxed);
748 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
750 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
751 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
753 #[stable(feature = "rust1", since = "1.0.0")]
754 pub fn store(&self, ptr: *mut T, order: Ordering) {
756 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
760 /// Stores a value into the pointer, returning the previous value.
762 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
763 /// of this operation.
765 /// [`Ordering`]: enum.Ordering.html
770 /// use std::sync::atomic::{AtomicPtr, Ordering};
772 /// let ptr = &mut 5;
773 /// let some_ptr = AtomicPtr::new(ptr);
775 /// let other_ptr = &mut 10;
777 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
780 #[stable(feature = "rust1", since = "1.0.0")]
781 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
782 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
785 /// Stores a value into the pointer if the current value is the same as the `current` value.
787 /// The return value is always the previous value. If it is equal to `current`, then the value
790 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
791 /// ordering of this operation.
793 /// [`Ordering`]: enum.Ordering.html
798 /// use std::sync::atomic::{AtomicPtr, Ordering};
800 /// let ptr = &mut 5;
801 /// let some_ptr = AtomicPtr::new(ptr);
803 /// let other_ptr = &mut 10;
804 /// let another_ptr = &mut 10;
806 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
809 #[stable(feature = "rust1", since = "1.0.0")]
810 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
811 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
817 /// Stores a value into the pointer if the current value is the same as the `current` value.
819 /// The return value is a result indicating whether the new value was written and containing
820 /// the previous value. On success this value is guaranteed to be equal to `current`.
822 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
823 /// ordering of this operation. The first describes the required ordering if
824 /// the operation succeeds while the second describes the required ordering when
825 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
826 /// and must be equivalent or weaker than the success ordering.
828 /// [`Ordering`]: enum.Ordering.html
829 /// [`Release`]: enum.Ordering.html#variant.Release
830 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
835 /// use std::sync::atomic::{AtomicPtr, Ordering};
837 /// let ptr = &mut 5;
838 /// let some_ptr = AtomicPtr::new(ptr);
840 /// let other_ptr = &mut 10;
841 /// let another_ptr = &mut 10;
843 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
844 /// Ordering::SeqCst, Ordering::Relaxed);
847 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
848 pub fn compare_exchange(&self,
853 -> Result<*mut T, *mut T> {
855 let res = atomic_compare_exchange(self.p.get() as *mut usize,
861 Ok(x) => Ok(x as *mut T),
862 Err(x) => Err(x as *mut T),
867 /// Stores a value into the pointer if the current value is the same as the `current` value.
869 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
870 /// comparison succeeds, which can result in more efficient code on some platforms. The
871 /// return value is a result indicating whether the new value was written and containing the
874 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
875 /// ordering of this operation. The first describes the required ordering if the operation
876 /// succeeds while the second describes the required ordering when the operation fails. The
877 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
878 /// weaker than the success ordering.
880 /// [`compare_exchange`]: #method.compare_exchange
881 /// [`Ordering`]: enum.Ordering.html
882 /// [`Release`]: enum.Ordering.html#variant.Release
883 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
888 /// use std::sync::atomic::{AtomicPtr, Ordering};
890 /// let some_ptr = AtomicPtr::new(&mut 5);
892 /// let new = &mut 10;
893 /// let mut old = some_ptr.load(Ordering::Relaxed);
895 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
897 /// Err(x) => old = x,
902 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
903 pub fn compare_exchange_weak(&self,
908 -> Result<*mut T, *mut T> {
910 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
916 Ok(x) => Ok(x as *mut T),
917 Err(x) => Err(x as *mut T),
923 #[cfg(target_has_atomic = "ptr")]
924 macro_rules! atomic_int {
925 ($stable:meta, $const_unstable:meta,
929 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
930 /// An integer type which can be safely shared between threads.
932 /// This type has the same in-memory representation as the underlying integer type.
934 pub struct $atomic_type {
935 v: UnsafeCell<$int_type>,
938 /// An atomic integer initialized to `0`.
940 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
943 impl Default for $atomic_type {
944 fn default() -> Self {
945 Self::new(Default::default())
950 impl fmt::Debug for $atomic_type {
951 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
952 f.debug_tuple(stringify!($atomic_type))
953 .field(&self.load(Ordering::SeqCst))
958 // Send is implicitly implemented.
960 unsafe impl Sync for $atomic_type {}
963 /// Creates a new atomic integer.
968 /// use std::sync::atomic::AtomicIsize;
970 /// let atomic_forty_two = AtomicIsize::new(42);
974 #[cfg_attr(not(stage0), $const_unstable)]
975 pub const fn new(v: $int_type) -> Self {
976 $atomic_type {v: UnsafeCell::new(v)}
979 /// Returns a mutable reference to the underlying integer.
981 /// This is safe because the mutable reference guarantees that no other threads are
982 /// concurrently accessing the atomic data.
987 /// use std::sync::atomic::{AtomicIsize, Ordering};
989 /// let mut some_isize = AtomicIsize::new(10);
990 /// assert_eq!(*some_isize.get_mut(), 10);
991 /// *some_isize.get_mut() = 5;
992 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
996 pub fn get_mut(&mut self) -> &mut $int_type {
997 unsafe { &mut *self.v.get() }
1000 /// Consumes the atomic and returns the contained value.
1002 /// This is safe because passing `self` by value guarantees that no other threads are
1003 /// concurrently accessing the atomic data.
1008 /// use std::sync::atomic::AtomicIsize;
1010 /// let some_isize = AtomicIsize::new(5);
1011 /// assert_eq!(some_isize.into_inner(), 5);
1015 pub fn into_inner(self) -> $int_type {
1016 unsafe { self.v.into_inner() }
1019 /// Loads a value from the atomic integer.
1021 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this
1026 /// Panics if `order` is [`Release`] or [`AcqRel`].
1028 /// [`Ordering`]: enum.Ordering.html
1029 /// [`Release`]: enum.Ordering.html#variant.Release
1030 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1035 /// use std::sync::atomic::{AtomicIsize, Ordering};
1037 /// let some_isize = AtomicIsize::new(5);
1039 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
1043 pub fn load(&self, order: Ordering) -> $int_type {
1044 unsafe { atomic_load(self.v.get(), order) }
1047 /// Stores a value into the atomic integer.
1049 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this
1052 /// [`Ordering`]: enum.Ordering.html
1057 /// use std::sync::atomic::{AtomicIsize, Ordering};
1059 /// let some_isize = AtomicIsize::new(5);
1061 /// some_isize.store(10, Ordering::Relaxed);
1062 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1067 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1069 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1070 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1073 pub fn store(&self, val: $int_type, order: Ordering) {
1074 unsafe { atomic_store(self.v.get(), val, order); }
1077 /// Stores a value into the atomic integer, returning the previous value.
1079 /// `swap` takes an [`Ordering`] argument which describes the memory ordering of this
1082 /// [`Ordering`]: enum.Ordering.html
1087 /// use std::sync::atomic::{AtomicIsize, Ordering};
1089 /// let some_isize = AtomicIsize::new(5);
1091 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
1095 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1096 unsafe { atomic_swap(self.v.get(), val, order) }
1099 /// Stores a value into the atomic integer if the current value is the same as the
1100 /// `current` value.
1102 /// The return value is always the previous value. If it is equal to `current`, then the
1103 /// value was updated.
1105 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1106 /// ordering of this operation.
1108 /// [`Ordering`]: enum.Ordering.html
1113 /// use std::sync::atomic::{AtomicIsize, Ordering};
1115 /// let some_isize = AtomicIsize::new(5);
1117 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1118 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1120 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1121 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1125 pub fn compare_and_swap(&self,
1128 order: Ordering) -> $int_type {
1129 match self.compare_exchange(current,
1132 strongest_failure_ordering(order)) {
1138 /// Stores a value into the atomic integer if the current value is the same as the
1139 /// `current` value.
1141 /// The return value is a result indicating whether the new value was written and
1142 /// containing the previous value. On success this value is guaranteed to be equal to
1145 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1146 /// ordering of this operation. The first describes the required ordering if
1147 /// the operation succeeds while the second describes the required ordering when
1148 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1149 /// must be equivalent or weaker than the success ordering.
1151 /// [`Ordering`]: enum.Ordering.html
1152 /// [`Release`]: enum.Ordering.html#variant.Release
1153 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1158 /// use std::sync::atomic::{AtomicIsize, Ordering};
1160 /// let some_isize = AtomicIsize::new(5);
1162 /// assert_eq!(some_isize.compare_exchange(5, 10,
1163 /// Ordering::Acquire,
1164 /// Ordering::Relaxed),
1166 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1168 /// assert_eq!(some_isize.compare_exchange(6, 12,
1169 /// Ordering::SeqCst,
1170 /// Ordering::Acquire),
1172 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1176 pub fn compare_exchange(&self,
1180 failure: Ordering) -> Result<$int_type, $int_type> {
1181 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1184 /// Stores a value into the atomic integer if the current value is the same as the
1185 /// `current` value.
1187 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1188 /// when the comparison succeeds, which can result in more efficient code on some
1189 /// platforms. The return value is a result indicating whether the new value was
1190 /// written and containing the previous value.
1192 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1193 /// ordering of this operation. The first describes the required ordering if the
1194 /// operation succeeds while the second describes the required ordering when the
1195 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1196 /// must be equivalent or weaker than the success ordering.
1198 /// [`compare_exchange`]: #method.compare_exchange
1199 /// [`Ordering`]: enum.Ordering.html
1200 /// [`Release`]: enum.Ordering.html#variant.Release
1201 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1206 /// use std::sync::atomic::{AtomicIsize, Ordering};
1208 /// let val = AtomicIsize::new(4);
1210 /// let mut old = val.load(Ordering::Relaxed);
1212 /// let new = old * 2;
1213 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1215 /// Err(x) => old = x,
1221 pub fn compare_exchange_weak(&self,
1225 failure: Ordering) -> Result<$int_type, $int_type> {
1227 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1231 /// Adds to the current value, returning the previous value.
1233 /// This operation wraps around on overflow.
1238 /// use std::sync::atomic::{AtomicIsize, Ordering};
1240 /// let foo = AtomicIsize::new(0);
1241 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1242 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1246 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1247 unsafe { atomic_add(self.v.get(), val, order) }
1250 /// Subtracts from the current value, returning the previous value.
1252 /// This operation wraps around on overflow.
1257 /// use std::sync::atomic::{AtomicIsize, Ordering};
1259 /// let foo = AtomicIsize::new(0);
1260 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1261 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1265 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1266 unsafe { atomic_sub(self.v.get(), val, order) }
1269 /// Bitwise "and" with the current value.
1271 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1272 /// sets the new value to the result.
1274 /// Returns the previous value.
1279 /// use std::sync::atomic::{AtomicIsize, Ordering};
1281 /// let foo = AtomicIsize::new(0b101101);
1282 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1283 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1286 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1287 unsafe { atomic_and(self.v.get(), val, order) }
1290 /// Bitwise "or" with the current value.
1292 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1293 /// sets the new value to the result.
1295 /// Returns the previous value.
1300 /// use std::sync::atomic::{AtomicIsize, Ordering};
1302 /// let foo = AtomicIsize::new(0b101101);
1303 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1304 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1307 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1308 unsafe { atomic_or(self.v.get(), val, order) }
1311 /// Bitwise "xor" with the current value.
1313 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1314 /// sets the new value to the result.
1316 /// Returns the previous value.
1321 /// use std::sync::atomic::{AtomicIsize, Ordering};
1323 /// let foo = AtomicIsize::new(0b101101);
1324 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1325 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1328 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1329 unsafe { atomic_xor(self.v.get(), val, order) }
1335 #[cfg(target_has_atomic = "8")]
1337 unstable(feature = "integer_atomics", issue = "32976"),
1338 rustc_const_unstable(feature = "const_atomic_i8_new"),
1339 unstable(feature = "integer_atomics", issue = "32976"),
1340 unstable(feature = "integer_atomics", issue = "32976"),
1341 unstable(feature = "integer_atomics", issue = "32976"),
1342 i8 AtomicI8 ATOMIC_I8_INIT
1344 #[cfg(target_has_atomic = "8")]
1346 unstable(feature = "integer_atomics", issue = "32976"),
1347 rustc_const_unstable(feature = "const_atomic_u8_new"),
1348 unstable(feature = "integer_atomics", issue = "32976"),
1349 unstable(feature = "integer_atomics", issue = "32976"),
1350 unstable(feature = "integer_atomics", issue = "32976"),
1351 u8 AtomicU8 ATOMIC_U8_INIT
1353 #[cfg(target_has_atomic = "16")]
1355 unstable(feature = "integer_atomics", issue = "32976"),
1356 rustc_const_unstable(feature = "const_atomic_i16_new"),
1357 unstable(feature = "integer_atomics", issue = "32976"),
1358 unstable(feature = "integer_atomics", issue = "32976"),
1359 unstable(feature = "integer_atomics", issue = "32976"),
1360 i16 AtomicI16 ATOMIC_I16_INIT
1362 #[cfg(target_has_atomic = "16")]
1364 unstable(feature = "integer_atomics", issue = "32976"),
1365 rustc_const_unstable(feature = "const_atomic_u16_new"),
1366 unstable(feature = "integer_atomics", issue = "32976"),
1367 unstable(feature = "integer_atomics", issue = "32976"),
1368 unstable(feature = "integer_atomics", issue = "32976"),
1369 u16 AtomicU16 ATOMIC_U16_INIT
1371 #[cfg(target_has_atomic = "32")]
1373 unstable(feature = "integer_atomics", issue = "32976"),
1374 rustc_const_unstable(feature = "const_atomic_i32_new"),
1375 unstable(feature = "integer_atomics", issue = "32976"),
1376 unstable(feature = "integer_atomics", issue = "32976"),
1377 unstable(feature = "integer_atomics", issue = "32976"),
1378 i32 AtomicI32 ATOMIC_I32_INIT
1380 #[cfg(target_has_atomic = "32")]
1382 unstable(feature = "integer_atomics", issue = "32976"),
1383 rustc_const_unstable(feature = "const_atomic_u32_new"),
1384 unstable(feature = "integer_atomics", issue = "32976"),
1385 unstable(feature = "integer_atomics", issue = "32976"),
1386 unstable(feature = "integer_atomics", issue = "32976"),
1387 u32 AtomicU32 ATOMIC_U32_INIT
1389 #[cfg(target_has_atomic = "64")]
1391 unstable(feature = "integer_atomics", issue = "32976"),
1392 rustc_const_unstable(feature = "const_atomic_i64_new"),
1393 unstable(feature = "integer_atomics", issue = "32976"),
1394 unstable(feature = "integer_atomics", issue = "32976"),
1395 unstable(feature = "integer_atomics", issue = "32976"),
1396 i64 AtomicI64 ATOMIC_I64_INIT
1398 #[cfg(target_has_atomic = "64")]
1400 unstable(feature = "integer_atomics", issue = "32976"),
1401 rustc_const_unstable(feature = "const_atomic_u64_new"),
1402 unstable(feature = "integer_atomics", issue = "32976"),
1403 unstable(feature = "integer_atomics", issue = "32976"),
1404 unstable(feature = "integer_atomics", issue = "32976"),
1405 u64 AtomicU64 ATOMIC_U64_INIT
1407 #[cfg(target_has_atomic = "ptr")]
1409 stable(feature = "rust1", since = "1.0.0"),
1410 rustc_const_unstable(feature = "const_atomic_isize_new"),
1411 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1412 stable(feature = "atomic_debug", since = "1.3.0"),
1413 stable(feature = "atomic_access", since = "1.15.0"),
1414 isize AtomicIsize ATOMIC_ISIZE_INIT
1416 #[cfg(target_has_atomic = "ptr")]
1418 stable(feature = "rust1", since = "1.0.0"),
1419 rustc_const_unstable(feature = "const_atomic_usize_new"),
1420 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1421 stable(feature = "atomic_debug", since = "1.3.0"),
1422 stable(feature = "atomic_access", since = "1.15.0"),
1423 usize AtomicUsize ATOMIC_USIZE_INIT
1427 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1434 __Nonexhaustive => __Nonexhaustive,
1439 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1441 Release => intrinsics::atomic_store_rel(dst, val),
1442 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1443 SeqCst => intrinsics::atomic_store(dst, val),
1444 Acquire => panic!("there is no such thing as an acquire store"),
1445 AcqRel => panic!("there is no such thing as an acquire/release store"),
1446 __Nonexhaustive => panic!("invalid memory ordering"),
1451 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1453 Acquire => intrinsics::atomic_load_acq(dst),
1454 Relaxed => intrinsics::atomic_load_relaxed(dst),
1455 SeqCst => intrinsics::atomic_load(dst),
1456 Release => panic!("there is no such thing as a release load"),
1457 AcqRel => panic!("there is no such thing as an acquire/release load"),
1458 __Nonexhaustive => panic!("invalid memory ordering"),
1463 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1465 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1466 Release => intrinsics::atomic_xchg_rel(dst, val),
1467 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1468 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1469 SeqCst => intrinsics::atomic_xchg(dst, val),
1470 __Nonexhaustive => panic!("invalid memory ordering"),
1474 /// Returns the previous value (like __sync_fetch_and_add).
1476 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1478 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1479 Release => intrinsics::atomic_xadd_rel(dst, val),
1480 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1481 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1482 SeqCst => intrinsics::atomic_xadd(dst, val),
1483 __Nonexhaustive => panic!("invalid memory ordering"),
1487 /// Returns the previous value (like __sync_fetch_and_sub).
1489 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1491 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1492 Release => intrinsics::atomic_xsub_rel(dst, val),
1493 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1494 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1495 SeqCst => intrinsics::atomic_xsub(dst, val),
1496 __Nonexhaustive => panic!("invalid memory ordering"),
1501 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1507 let (val, ok) = match (success, failure) {
1508 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1509 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1510 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1511 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1512 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1513 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1514 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1515 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1516 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1517 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1518 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1519 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1520 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1521 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1523 if ok { Ok(val) } else { Err(val) }
1527 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1533 let (val, ok) = match (success, failure) {
1534 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1535 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1536 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1537 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1538 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1539 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1540 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1541 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1542 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1543 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1544 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1545 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1546 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1547 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1549 if ok { Ok(val) } else { Err(val) }
1553 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1555 Acquire => intrinsics::atomic_and_acq(dst, val),
1556 Release => intrinsics::atomic_and_rel(dst, val),
1557 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1558 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1559 SeqCst => intrinsics::atomic_and(dst, val),
1560 __Nonexhaustive => panic!("invalid memory ordering"),
1565 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1567 Acquire => intrinsics::atomic_or_acq(dst, val),
1568 Release => intrinsics::atomic_or_rel(dst, val),
1569 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1570 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1571 SeqCst => intrinsics::atomic_or(dst, val),
1572 __Nonexhaustive => panic!("invalid memory ordering"),
1577 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1579 Acquire => intrinsics::atomic_xor_acq(dst, val),
1580 Release => intrinsics::atomic_xor_rel(dst, val),
1581 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1582 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1583 SeqCst => intrinsics::atomic_xor(dst, val),
1584 __Nonexhaustive => panic!("invalid memory ordering"),
1588 /// An atomic fence.
1590 /// Depending on the specified order, a fence prevents the compiler and CPU from
1591 /// reordering certain types of memory operations around it.
1592 /// That creates synchronizes-with relationships between it and atomic operations
1593 /// or fences in other threads.
1595 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
1596 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
1597 /// exist operations X and Y, both operating on some atomic object 'M' such
1598 /// that A is sequenced before X, Y is synchronized before B and Y observes
1599 /// the change to M. This provides a happens-before dependence between A and B.
1602 /// Thread 1 Thread 2
1604 /// fence(Release); A --------------
1605 /// x.store(3, Relaxed); X --------- |
1608 /// -------------> Y if x.load(Relaxed) == 3 {
1609 /// |-------> B fence(Acquire);
1614 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1617 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1618 /// and [`Release`] semantics, participates in the global program order of the
1619 /// other [`SeqCst`] operations and/or fences.
1621 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1625 /// Panics if `order` is [`Relaxed`].
1630 /// use std::sync::atomic::AtomicBool;
1631 /// use std::sync::atomic::fence;
1632 /// use std::sync::atomic::Ordering;
1634 /// // A mutual exclusion primitive based on spinlock.
1635 /// pub struct Mutex {
1636 /// flag: AtomicBool,
1640 /// pub fn new() -> Mutex {
1642 /// flag: AtomicBool::new(false),
1646 /// pub fn lock(&self) {
1647 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
1648 /// // This fence synchronizes-with store in `unlock`.
1649 /// fence(Ordering::Acquire);
1652 /// pub fn unlock(&self) {
1653 /// self.flag.store(false, Ordering::Release);
1658 /// [`Ordering`]: enum.Ordering.html
1659 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1660 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1661 /// [`Release`]: enum.Ordering.html#variant.Release
1662 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1663 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1665 #[stable(feature = "rust1", since = "1.0.0")]
1666 pub fn fence(order: Ordering) {
1669 Acquire => intrinsics::atomic_fence_acq(),
1670 Release => intrinsics::atomic_fence_rel(),
1671 AcqRel => intrinsics::atomic_fence_acqrel(),
1672 SeqCst => intrinsics::atomic_fence(),
1673 Relaxed => panic!("there is no such thing as a relaxed fence"),
1674 __Nonexhaustive => panic!("invalid memory ordering"),
1680 /// A compiler memory fence.
1682 /// `compiler_fence` does not emit any machine code, but prevents the compiler from re-ordering
1683 /// memory operations across this point. Which reorderings are disallowed is dictated by the given
1684 /// [`Ordering`]. Note that `compiler_fence` does *not* introduce inter-thread memory
1685 /// synchronization; for that, a [`fence`] is needed.
1687 /// The re-ordering prevented by the different ordering semantics are:
1689 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
1690 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
1691 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
1692 /// - with [`AcqRel`], both of the above rules are enforced.
1696 /// Panics if `order` is [`Relaxed`].
1698 /// [`fence`]: fn.fence.html
1699 /// [`Ordering`]: enum.Ordering.html
1700 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1701 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1702 /// [`Release`]: enum.Ordering.html#variant.Release
1703 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1704 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1706 #[unstable(feature = "compiler_fences", issue = "41091")]
1707 pub fn compiler_fence(order: Ordering) {
1710 Acquire => intrinsics::atomic_singlethreadfence_acq(),
1711 Release => intrinsics::atomic_singlethreadfence_rel(),
1712 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
1713 SeqCst => intrinsics::atomic_singlethreadfence(),
1714 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
1715 __Nonexhaustive => panic!("invalid memory ordering"),
1721 #[cfg(target_has_atomic = "8")]
1722 #[stable(feature = "atomic_debug", since = "1.3.0")]
1723 impl fmt::Debug for AtomicBool {
1724 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1725 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1729 #[cfg(target_has_atomic = "ptr")]
1730 #[stable(feature = "atomic_debug", since = "1.3.0")]
1731 impl<T> fmt::Debug for AtomicPtr<T> {
1732 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1733 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()