1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering::*;
97 /// A boolean type which can be safely shared between threads.
99 /// This type has the same in-memory representation as a `bool`.
100 #[cfg(target_has_atomic = "8")]
101 #[stable(feature = "rust1", since = "1.0.0")]
102 pub struct AtomicBool {
106 #[cfg(target_has_atomic = "8")]
107 #[stable(feature = "rust1", since = "1.0.0")]
108 impl Default for AtomicBool {
109 /// Creates an `AtomicBool` initialized to `false`.
110 fn default() -> Self {
115 // Send is implicitly implemented for AtomicBool.
116 #[cfg(target_has_atomic = "8")]
117 #[stable(feature = "rust1", since = "1.0.0")]
118 unsafe impl Sync for AtomicBool {}
120 /// A raw pointer type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a `*mut T`.
123 #[cfg(target_has_atomic = "ptr")]
124 #[stable(feature = "rust1", since = "1.0.0")]
125 pub struct AtomicPtr<T> {
126 p: UnsafeCell<*mut T>,
129 #[cfg(target_has_atomic = "ptr")]
130 #[stable(feature = "rust1", since = "1.0.0")]
131 impl<T> Default for AtomicPtr<T> {
132 /// Creates a null `AtomicPtr<T>`.
133 fn default() -> AtomicPtr<T> {
134 AtomicPtr::new(::ptr::null_mut())
138 #[cfg(target_has_atomic = "ptr")]
139 #[stable(feature = "rust1", since = "1.0.0")]
140 unsafe impl<T> Send for AtomicPtr<T> {}
141 #[cfg(target_has_atomic = "ptr")]
142 #[stable(feature = "rust1", since = "1.0.0")]
143 unsafe impl<T> Sync for AtomicPtr<T> {}
145 /// Atomic memory orderings
147 /// Memory orderings limit the ways that both the compiler and CPU may reorder
148 /// instructions around atomic operations. At its most restrictive,
149 /// "sequentially consistent" atomics allow neither reads nor writes
150 /// to be moved either before or after the atomic operation; on the other end
151 /// "relaxed" atomics allow all reorderings.
153 /// Rust's memory orderings are [the same as
154 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
156 /// For more information see the [nomicon][1].
157 /// [1]: ../../../nomicon/atomics.html
158 #[stable(feature = "rust1", since = "1.0.0")]
159 #[derive(Copy, Clone, Debug)]
161 /// No ordering constraints, only atomic operations.
163 /// Corresponds to LLVM's [`Monotonic`] ordering.
165 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
166 #[stable(feature = "rust1", since = "1.0.0")]
168 /// When coupled with a store, all previous writes become visible
169 /// to the other threads that perform a load with [`Acquire`] ordering
170 /// on the same value.
172 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
173 #[stable(feature = "rust1", since = "1.0.0")]
175 /// When coupled with a load, all subsequent loads will see data
176 /// written before a store with [`Release`] ordering on the same value
177 /// in other threads.
179 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
180 #[stable(feature = "rust1", since = "1.0.0")]
182 /// When coupled with a load, uses [`Acquire`] ordering, and with a store
183 /// [`Release`] ordering.
185 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
186 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
187 #[stable(feature = "rust1", since = "1.0.0")]
189 /// Like `AcqRel` with the additional guarantee that all threads see all
190 /// sequentially consistent operations in the same order.
191 #[stable(feature = "rust1", since = "1.0.0")]
193 // Prevent exhaustive matching to allow for future extension
195 #[unstable(feature = "future_atomic_orderings", issue = "0")]
199 /// An [`AtomicBool`] initialized to `false`.
201 /// [`AtomicBool`]: struct.AtomicBool.html
202 #[cfg(target_has_atomic = "8")]
203 #[stable(feature = "rust1", since = "1.0.0")]
204 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
206 #[cfg(target_has_atomic = "8")]
208 /// Creates a new `AtomicBool`.
213 /// use std::sync::atomic::AtomicBool;
215 /// let atomic_true = AtomicBool::new(true);
216 /// let atomic_false = AtomicBool::new(false);
219 #[stable(feature = "rust1", since = "1.0.0")]
220 pub const fn new(v: bool) -> AtomicBool {
221 AtomicBool { v: UnsafeCell::new(v as u8) }
224 /// Returns a mutable reference to the underlying `bool`.
226 /// This is safe because the mutable reference guarantees that no other threads are
227 /// concurrently accessing the atomic data.
232 /// use std::sync::atomic::{AtomicBool, Ordering};
234 /// let mut some_bool = AtomicBool::new(true);
235 /// assert_eq!(*some_bool.get_mut(), true);
236 /// *some_bool.get_mut() = false;
237 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
240 #[stable(feature = "atomic_access", since = "1.15.0")]
241 pub fn get_mut(&mut self) -> &mut bool {
242 unsafe { &mut *(self.v.get() as *mut bool) }
245 /// Consumes the atomic and returns the contained value.
247 /// This is safe because passing `self` by value guarantees that no other threads are
248 /// concurrently accessing the atomic data.
253 /// use std::sync::atomic::AtomicBool;
255 /// let some_bool = AtomicBool::new(true);
256 /// assert_eq!(some_bool.into_inner(), true);
259 #[stable(feature = "atomic_access", since = "1.15.0")]
260 pub fn into_inner(self) -> bool {
261 unsafe { self.v.into_inner() != 0 }
264 /// Loads a value from the bool.
266 /// `load` takes an [`Ordering`] argument which describes the memory ordering
267 /// of this operation.
271 /// Panics if `order` is [`Release`] or [`AcqRel`].
273 /// [`Ordering`]: enum.Ordering.html
274 /// [`Release`]: enum.Ordering.html#variant.Release
275 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
280 /// use std::sync::atomic::{AtomicBool, Ordering};
282 /// let some_bool = AtomicBool::new(true);
284 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
287 #[stable(feature = "rust1", since = "1.0.0")]
288 pub fn load(&self, order: Ordering) -> bool {
289 unsafe { atomic_load(self.v.get(), order) != 0 }
292 /// Stores a value into the bool.
294 /// `store` takes an [`Ordering`] argument which describes the memory ordering
295 /// of this operation.
297 /// [`Ordering`]: enum.Ordering.html
302 /// use std::sync::atomic::{AtomicBool, Ordering};
304 /// let some_bool = AtomicBool::new(true);
306 /// some_bool.store(false, Ordering::Relaxed);
307 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
312 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
314 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
315 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
317 #[stable(feature = "rust1", since = "1.0.0")]
318 pub fn store(&self, val: bool, order: Ordering) {
320 atomic_store(self.v.get(), val as u8, order);
324 /// Stores a value into the bool, returning the previous value.
326 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
327 /// of this operation.
329 /// [`Ordering`]: enum.Ordering.html
334 /// use std::sync::atomic::{AtomicBool, Ordering};
336 /// let some_bool = AtomicBool::new(true);
338 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
339 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
342 #[stable(feature = "rust1", since = "1.0.0")]
343 pub fn swap(&self, val: bool, order: Ordering) -> bool {
344 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
347 /// Stores a value into the `bool` if the current value is the same as the `current` value.
349 /// The return value is always the previous value. If it is equal to `current`, then the value
352 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
353 /// ordering of this operation.
355 /// [`Ordering`]: enum.Ordering.html
360 /// use std::sync::atomic::{AtomicBool, Ordering};
362 /// let some_bool = AtomicBool::new(true);
364 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
365 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
367 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
368 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
371 #[stable(feature = "rust1", since = "1.0.0")]
372 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
373 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
379 /// Stores a value into the `bool` if the current value is the same as the `current` value.
381 /// The return value is a result indicating whether the new value was written and containing
382 /// the previous value. On success this value is guaranteed to be equal to `current`.
384 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
385 /// ordering of this operation. The first describes the required ordering if the
386 /// operation succeeds while the second describes the required ordering when the
387 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
388 /// be equivalent or weaker than the success ordering.
390 /// [`Ordering`]: enum.Ordering.html
391 /// [`Release`]: enum.Ordering.html#variant.Release
392 /// [`AcqRel`]: enum.Ordering.html#variant.Release
397 /// use std::sync::atomic::{AtomicBool, Ordering};
399 /// let some_bool = AtomicBool::new(true);
401 /// assert_eq!(some_bool.compare_exchange(true,
403 /// Ordering::Acquire,
404 /// Ordering::Relaxed),
406 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
408 /// assert_eq!(some_bool.compare_exchange(true, true,
409 /// Ordering::SeqCst,
410 /// Ordering::Acquire),
412 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
415 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
416 pub fn compare_exchange(&self,
421 -> Result<bool, bool> {
423 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
426 Err(x) => Err(x != 0),
430 /// Stores a value into the `bool` if the current value is the same as the `current` value.
432 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
433 /// comparison succeeds, which can result in more efficient code on some platforms. The
434 /// return value is a result indicating whether the new value was written and containing the
437 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
438 /// ordering of this operation. The first describes the required ordering if the operation
439 /// succeeds while the second describes the required ordering when the operation fails. The
440 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
441 /// weaker than the success ordering.
443 /// [`compare_exchange`]: #method.compare_exchange
444 /// [`Ordering`]: enum.Ordering.html
445 /// [`Release`]: enum.Ordering.html#variant.Release
446 /// [`AcqRel`]: enum.Ordering.html#variant.Release
451 /// use std::sync::atomic::{AtomicBool, Ordering};
453 /// let val = AtomicBool::new(false);
456 /// let mut old = val.load(Ordering::Relaxed);
458 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
460 /// Err(x) => old = x,
465 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
466 pub fn compare_exchange_weak(&self,
471 -> Result<bool, bool> {
473 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
476 Err(x) => Err(x != 0),
480 /// Logical "and" with a boolean value.
482 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
483 /// the new value to the result.
485 /// Returns the previous value.
490 /// use std::sync::atomic::{AtomicBool, Ordering};
492 /// let foo = AtomicBool::new(true);
493 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
494 /// assert_eq!(foo.load(Ordering::SeqCst), false);
496 /// let foo = AtomicBool::new(true);
497 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
498 /// assert_eq!(foo.load(Ordering::SeqCst), true);
500 /// let foo = AtomicBool::new(false);
501 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
502 /// assert_eq!(foo.load(Ordering::SeqCst), false);
505 #[stable(feature = "rust1", since = "1.0.0")]
506 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
507 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
510 /// Logical "nand" with a boolean value.
512 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
513 /// the new value to the result.
515 /// Returns the previous value.
520 /// use std::sync::atomic::{AtomicBool, Ordering};
522 /// let foo = AtomicBool::new(true);
523 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
524 /// assert_eq!(foo.load(Ordering::SeqCst), true);
526 /// let foo = AtomicBool::new(true);
527 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
528 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
529 /// assert_eq!(foo.load(Ordering::SeqCst), false);
531 /// let foo = AtomicBool::new(false);
532 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
533 /// assert_eq!(foo.load(Ordering::SeqCst), true);
536 #[stable(feature = "rust1", since = "1.0.0")]
537 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
538 // We can't use atomic_nand here because it can result in a bool with
539 // an invalid value. This happens because the atomic operation is done
540 // with an 8-bit integer internally, which would set the upper 7 bits.
541 // So we just use a compare-exchange loop instead, which is what the
542 // intrinsic actually expands to anyways on many platforms.
543 let mut old = self.load(Relaxed);
545 let new = !(old && val);
546 match self.compare_exchange_weak(old, new, order, Relaxed) {
554 /// Logical "or" with a boolean value.
556 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
557 /// new value to the result.
559 /// Returns the previous value.
564 /// use std::sync::atomic::{AtomicBool, Ordering};
566 /// let foo = AtomicBool::new(true);
567 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
568 /// assert_eq!(foo.load(Ordering::SeqCst), true);
570 /// let foo = AtomicBool::new(true);
571 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
572 /// assert_eq!(foo.load(Ordering::SeqCst), true);
574 /// let foo = AtomicBool::new(false);
575 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
576 /// assert_eq!(foo.load(Ordering::SeqCst), false);
579 #[stable(feature = "rust1", since = "1.0.0")]
580 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
581 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
584 /// Logical "xor" with a boolean value.
586 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
587 /// the new value to the result.
589 /// Returns the previous value.
594 /// use std::sync::atomic::{AtomicBool, Ordering};
596 /// let foo = AtomicBool::new(true);
597 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
598 /// assert_eq!(foo.load(Ordering::SeqCst), true);
600 /// let foo = AtomicBool::new(true);
601 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
602 /// assert_eq!(foo.load(Ordering::SeqCst), false);
604 /// let foo = AtomicBool::new(false);
605 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
606 /// assert_eq!(foo.load(Ordering::SeqCst), false);
609 #[stable(feature = "rust1", since = "1.0.0")]
610 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
611 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
615 #[cfg(target_has_atomic = "ptr")]
616 impl<T> AtomicPtr<T> {
617 /// Creates a new `AtomicPtr`.
622 /// use std::sync::atomic::AtomicPtr;
624 /// let ptr = &mut 5;
625 /// let atomic_ptr = AtomicPtr::new(ptr);
628 #[stable(feature = "rust1", since = "1.0.0")]
629 pub const fn new(p: *mut T) -> AtomicPtr<T> {
630 AtomicPtr { p: UnsafeCell::new(p) }
633 /// Returns a mutable reference to the underlying pointer.
635 /// This is safe because the mutable reference guarantees that no other threads are
636 /// concurrently accessing the atomic data.
641 /// use std::sync::atomic::{AtomicPtr, Ordering};
643 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
644 /// *atomic_ptr.get_mut() = &mut 5;
645 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
648 #[stable(feature = "atomic_access", since = "1.15.0")]
649 pub fn get_mut(&mut self) -> &mut *mut T {
650 unsafe { &mut *self.p.get() }
653 /// Consumes the atomic and returns the contained value.
655 /// This is safe because passing `self` by value guarantees that no other threads are
656 /// concurrently accessing the atomic data.
661 /// use std::sync::atomic::AtomicPtr;
663 /// let atomic_ptr = AtomicPtr::new(&mut 5);
664 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
667 #[stable(feature = "atomic_access", since = "1.15.0")]
668 pub fn into_inner(self) -> *mut T {
669 unsafe { self.p.into_inner() }
672 /// Loads a value from the pointer.
674 /// `load` takes an [`Ordering`] argument which describes the memory ordering
675 /// of this operation.
679 /// Panics if `order` is [`Release`] or [`AcqRel`].
681 /// [`Ordering`]: enum.Ordering.html
682 /// [`Release`]: enum.Ordering.html#variant.Release
683 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
688 /// use std::sync::atomic::{AtomicPtr, Ordering};
690 /// let ptr = &mut 5;
691 /// let some_ptr = AtomicPtr::new(ptr);
693 /// let value = some_ptr.load(Ordering::Relaxed);
696 #[stable(feature = "rust1", since = "1.0.0")]
697 pub fn load(&self, order: Ordering) -> *mut T {
698 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
701 /// Stores a value into the pointer.
703 /// `store` takes an [`Ordering`] argument which describes the memory ordering
704 /// of this operation.
706 /// [`Ordering`]: enum.Ordering.html
711 /// use std::sync::atomic::{AtomicPtr, Ordering};
713 /// let ptr = &mut 5;
714 /// let some_ptr = AtomicPtr::new(ptr);
716 /// let other_ptr = &mut 10;
718 /// some_ptr.store(other_ptr, Ordering::Relaxed);
723 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
725 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
726 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
728 #[stable(feature = "rust1", since = "1.0.0")]
729 pub fn store(&self, ptr: *mut T, order: Ordering) {
731 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
735 /// Stores a value into the pointer, returning the previous value.
737 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
738 /// of this operation.
740 /// [`Ordering`]: enum.Ordering.html
745 /// use std::sync::atomic::{AtomicPtr, Ordering};
747 /// let ptr = &mut 5;
748 /// let some_ptr = AtomicPtr::new(ptr);
750 /// let other_ptr = &mut 10;
752 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
755 #[stable(feature = "rust1", since = "1.0.0")]
756 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
757 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
760 /// Stores a value into the pointer if the current value is the same as the `current` value.
762 /// The return value is always the previous value. If it is equal to `current`, then the value
765 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
766 /// ordering of this operation.
768 /// [`Ordering`]: enum.Ordering.html
773 /// use std::sync::atomic::{AtomicPtr, Ordering};
775 /// let ptr = &mut 5;
776 /// let some_ptr = AtomicPtr::new(ptr);
778 /// let other_ptr = &mut 10;
779 /// let another_ptr = &mut 10;
781 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
784 #[stable(feature = "rust1", since = "1.0.0")]
785 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
786 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
792 /// Stores a value into the pointer if the current value is the same as the `current` value.
794 /// The return value is a result indicating whether the new value was written and containing
795 /// the previous value. On success this value is guaranteed to be equal to `current`.
797 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
798 /// ordering of this operation. The first describes the required ordering if
799 /// the operation succeeds while the second describes the required ordering when
800 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
801 /// and must be equivalent or weaker than the success ordering.
803 /// [`Ordering`]: enum.Ordering.html
804 /// [`Release`]: enum.Ordering.html#variant.Release
805 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
810 /// use std::sync::atomic::{AtomicPtr, Ordering};
812 /// let ptr = &mut 5;
813 /// let some_ptr = AtomicPtr::new(ptr);
815 /// let other_ptr = &mut 10;
816 /// let another_ptr = &mut 10;
818 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
819 /// Ordering::SeqCst, Ordering::Relaxed);
822 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
823 pub fn compare_exchange(&self,
828 -> Result<*mut T, *mut T> {
830 let res = atomic_compare_exchange(self.p.get() as *mut usize,
836 Ok(x) => Ok(x as *mut T),
837 Err(x) => Err(x as *mut T),
842 /// Stores a value into the pointer if the current value is the same as the `current` value.
844 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
845 /// comparison succeeds, which can result in more efficient code on some platforms. The
846 /// return value is a result indicating whether the new value was written and containing the
849 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
850 /// ordering of this operation. The first describes the required ordering if the operation
851 /// succeeds while the second describes the required ordering when the operation fails. The
852 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
853 /// weaker than the success ordering.
855 /// [`compare_exchange`]: #method.compare_exchange
856 /// [`Ordering`]: enum.Ordering.html
857 /// [`Release`]: enum.Ordering.html#variant.Release
858 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
863 /// use std::sync::atomic::{AtomicPtr, Ordering};
865 /// let some_ptr = AtomicPtr::new(&mut 5);
867 /// let new = &mut 10;
868 /// let mut old = some_ptr.load(Ordering::Relaxed);
870 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
872 /// Err(x) => old = x,
877 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
878 pub fn compare_exchange_weak(&self,
883 -> Result<*mut T, *mut T> {
885 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
891 Ok(x) => Ok(x as *mut T),
892 Err(x) => Err(x as *mut T),
898 macro_rules! atomic_int {
903 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
904 /// An integer type which can be safely shared between threads.
906 /// This type has the same in-memory representation as the underlying integer type.
908 pub struct $atomic_type {
909 v: UnsafeCell<$int_type>,
912 /// An atomic integer initialized to `0`.
914 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
917 impl Default for $atomic_type {
918 fn default() -> Self {
919 Self::new(Default::default())
924 impl fmt::Debug for $atomic_type {
925 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
926 f.debug_tuple(stringify!($atomic_type))
927 .field(&self.load(Ordering::SeqCst))
932 // Send is implicitly implemented.
934 unsafe impl Sync for $atomic_type {}
937 /// Creates a new atomic integer.
942 /// use std::sync::atomic::AtomicIsize;
944 /// let atomic_forty_two = AtomicIsize::new(42);
948 pub const fn new(v: $int_type) -> Self {
949 $atomic_type {v: UnsafeCell::new(v)}
952 /// Returns a mutable reference to the underlying integer.
954 /// This is safe because the mutable reference guarantees that no other threads are
955 /// concurrently accessing the atomic data.
960 /// use std::sync::atomic::{AtomicIsize, Ordering};
962 /// let mut some_isize = AtomicIsize::new(10);
963 /// assert_eq!(*some_isize.get_mut(), 10);
964 /// *some_isize.get_mut() = 5;
965 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
969 pub fn get_mut(&mut self) -> &mut $int_type {
970 unsafe { &mut *self.v.get() }
973 /// Consumes the atomic and returns the contained value.
975 /// This is safe because passing `self` by value guarantees that no other threads are
976 /// concurrently accessing the atomic data.
981 /// use std::sync::atomic::AtomicIsize;
983 /// let some_isize = AtomicIsize::new(5);
984 /// assert_eq!(some_isize.into_inner(), 5);
988 pub fn into_inner(self) -> $int_type {
989 unsafe { self.v.into_inner() }
992 /// Loads a value from the atomic integer.
994 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this
999 /// Panics if `order` is [`Release`] or [`AcqRel`].
1001 /// [`Ordering`]: enum.Ordering.html
1002 /// [`Release`]: enum.Ordering.html#variant.Release
1003 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1008 /// use std::sync::atomic::{AtomicIsize, Ordering};
1010 /// let some_isize = AtomicIsize::new(5);
1012 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
1016 pub fn load(&self, order: Ordering) -> $int_type {
1017 unsafe { atomic_load(self.v.get(), order) }
1020 /// Stores a value into the atomic integer.
1022 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this
1025 /// [`Ordering`]: enum.Ordering.html
1030 /// use std::sync::atomic::{AtomicIsize, Ordering};
1032 /// let some_isize = AtomicIsize::new(5);
1034 /// some_isize.store(10, Ordering::Relaxed);
1035 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1040 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1042 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1043 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1046 pub fn store(&self, val: $int_type, order: Ordering) {
1047 unsafe { atomic_store(self.v.get(), val, order); }
1050 /// Stores a value into the atomic integer, returning the previous value.
1052 /// `swap` takes an [`Ordering`] argument which describes the memory ordering of this
1055 /// [`Ordering`]: enum.Ordering.html
1060 /// use std::sync::atomic::{AtomicIsize, Ordering};
1062 /// let some_isize = AtomicIsize::new(5);
1064 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
1068 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1069 unsafe { atomic_swap(self.v.get(), val, order) }
1072 /// Stores a value into the atomic integer if the current value is the same as the
1073 /// `current` value.
1075 /// The return value is always the previous value. If it is equal to `current`, then the
1076 /// value was updated.
1078 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1079 /// ordering of this operation.
1081 /// [`Ordering`]: enum.Ordering.html
1086 /// use std::sync::atomic::{AtomicIsize, Ordering};
1088 /// let some_isize = AtomicIsize::new(5);
1090 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1091 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1093 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1094 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1098 pub fn compare_and_swap(&self,
1101 order: Ordering) -> $int_type {
1102 match self.compare_exchange(current,
1105 strongest_failure_ordering(order)) {
1111 /// Stores a value into the atomic integer if the current value is the same as the
1112 /// `current` value.
1114 /// The return value is a result indicating whether the new value was written and
1115 /// containing the previous value. On success this value is guaranteed to be equal to
1118 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1119 /// ordering of this operation. The first describes the required ordering if
1120 /// the operation succeeds while the second describes the required ordering when
1121 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1122 /// must be equivalent or weaker than the success ordering.
1124 /// [`Ordering`]: enum.Ordering.html
1125 /// [`Release`]: enum.Ordering.html#variant.Release
1126 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1131 /// use std::sync::atomic::{AtomicIsize, Ordering};
1133 /// let some_isize = AtomicIsize::new(5);
1135 /// assert_eq!(some_isize.compare_exchange(5, 10,
1136 /// Ordering::Acquire,
1137 /// Ordering::Relaxed),
1139 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1141 /// assert_eq!(some_isize.compare_exchange(6, 12,
1142 /// Ordering::SeqCst,
1143 /// Ordering::Acquire),
1145 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1149 pub fn compare_exchange(&self,
1153 failure: Ordering) -> Result<$int_type, $int_type> {
1154 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1157 /// Stores a value into the atomic integer if the current value is the same as the
1158 /// `current` value.
1160 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1161 /// when the comparison succeeds, which can result in more efficient code on some
1162 /// platforms. The return value is a result indicating whether the new value was
1163 /// written and containing the previous value.
1165 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1166 /// ordering of this operation. The first describes the required ordering if the
1167 /// operation succeeds while the second describes the required ordering when the
1168 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1169 /// must be equivalent or weaker than the success ordering.
1171 /// [`compare_exchange`]: #method.compare_exchange
1172 /// [`Ordering`]: enum.Ordering.html
1173 /// [`Release`]: enum.Ordering.html#variant.Release
1174 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1179 /// use std::sync::atomic::{AtomicIsize, Ordering};
1181 /// let val = AtomicIsize::new(4);
1183 /// let mut old = val.load(Ordering::Relaxed);
1185 /// let new = old * 2;
1186 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1188 /// Err(x) => old = x,
1194 pub fn compare_exchange_weak(&self,
1198 failure: Ordering) -> Result<$int_type, $int_type> {
1200 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1204 /// Adds to the current value, returning the previous value.
1206 /// This operation wraps around on overflow.
1211 /// use std::sync::atomic::{AtomicIsize, Ordering};
1213 /// let foo = AtomicIsize::new(0);
1214 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1215 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1219 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1220 unsafe { atomic_add(self.v.get(), val, order) }
1223 /// Subtracts from the current value, returning the previous value.
1225 /// This operation wraps around on overflow.
1230 /// use std::sync::atomic::{AtomicIsize, Ordering};
1232 /// let foo = AtomicIsize::new(0);
1233 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1234 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1238 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1239 unsafe { atomic_sub(self.v.get(), val, order) }
1242 /// Bitwise "and" with the current value.
1244 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1245 /// sets the new value to the result.
1247 /// Returns the previous value.
1252 /// use std::sync::atomic::{AtomicIsize, Ordering};
1254 /// let foo = AtomicIsize::new(0b101101);
1255 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1256 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1259 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1260 unsafe { atomic_and(self.v.get(), val, order) }
1263 /// Bitwise "or" with the current value.
1265 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1266 /// sets the new value to the result.
1268 /// Returns the previous value.
1273 /// use std::sync::atomic::{AtomicIsize, Ordering};
1275 /// let foo = AtomicIsize::new(0b101101);
1276 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1277 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1280 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1281 unsafe { atomic_or(self.v.get(), val, order) }
1284 /// Bitwise "xor" with the current value.
1286 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1287 /// sets the new value to the result.
1289 /// Returns the previous value.
1294 /// use std::sync::atomic::{AtomicIsize, Ordering};
1296 /// let foo = AtomicIsize::new(0b101101);
1297 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1298 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1301 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1302 unsafe { atomic_xor(self.v.get(), val, order) }
1308 #[cfg(target_has_atomic = "8")]
1310 unstable(feature = "integer_atomics", issue = "32976"),
1311 unstable(feature = "integer_atomics", issue = "32976"),
1312 unstable(feature = "integer_atomics", issue = "32976"),
1313 unstable(feature = "integer_atomics", issue = "32976"),
1314 i8 AtomicI8 ATOMIC_I8_INIT
1316 #[cfg(target_has_atomic = "8")]
1318 unstable(feature = "integer_atomics", issue = "32976"),
1319 unstable(feature = "integer_atomics", issue = "32976"),
1320 unstable(feature = "integer_atomics", issue = "32976"),
1321 unstable(feature = "integer_atomics", issue = "32976"),
1322 u8 AtomicU8 ATOMIC_U8_INIT
1324 #[cfg(target_has_atomic = "16")]
1326 unstable(feature = "integer_atomics", issue = "32976"),
1327 unstable(feature = "integer_atomics", issue = "32976"),
1328 unstable(feature = "integer_atomics", issue = "32976"),
1329 unstable(feature = "integer_atomics", issue = "32976"),
1330 i16 AtomicI16 ATOMIC_I16_INIT
1332 #[cfg(target_has_atomic = "16")]
1334 unstable(feature = "integer_atomics", issue = "32976"),
1335 unstable(feature = "integer_atomics", issue = "32976"),
1336 unstable(feature = "integer_atomics", issue = "32976"),
1337 unstable(feature = "integer_atomics", issue = "32976"),
1338 u16 AtomicU16 ATOMIC_U16_INIT
1340 #[cfg(target_has_atomic = "32")]
1342 unstable(feature = "integer_atomics", issue = "32976"),
1343 unstable(feature = "integer_atomics", issue = "32976"),
1344 unstable(feature = "integer_atomics", issue = "32976"),
1345 unstable(feature = "integer_atomics", issue = "32976"),
1346 i32 AtomicI32 ATOMIC_I32_INIT
1348 #[cfg(target_has_atomic = "32")]
1350 unstable(feature = "integer_atomics", issue = "32976"),
1351 unstable(feature = "integer_atomics", issue = "32976"),
1352 unstable(feature = "integer_atomics", issue = "32976"),
1353 unstable(feature = "integer_atomics", issue = "32976"),
1354 u32 AtomicU32 ATOMIC_U32_INIT
1356 #[cfg(target_has_atomic = "64")]
1358 unstable(feature = "integer_atomics", issue = "32976"),
1359 unstable(feature = "integer_atomics", issue = "32976"),
1360 unstable(feature = "integer_atomics", issue = "32976"),
1361 unstable(feature = "integer_atomics", issue = "32976"),
1362 i64 AtomicI64 ATOMIC_I64_INIT
1364 #[cfg(target_has_atomic = "64")]
1366 unstable(feature = "integer_atomics", issue = "32976"),
1367 unstable(feature = "integer_atomics", issue = "32976"),
1368 unstable(feature = "integer_atomics", issue = "32976"),
1369 unstable(feature = "integer_atomics", issue = "32976"),
1370 u64 AtomicU64 ATOMIC_U64_INIT
1372 #[cfg(target_has_atomic = "ptr")]
1374 stable(feature = "rust1", since = "1.0.0"),
1375 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1376 stable(feature = "atomic_debug", since = "1.3.0"),
1377 stable(feature = "atomic_access", since = "1.15.0"),
1378 isize AtomicIsize ATOMIC_ISIZE_INIT
1380 #[cfg(target_has_atomic = "ptr")]
1382 stable(feature = "rust1", since = "1.0.0"),
1383 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1384 stable(feature = "atomic_debug", since = "1.3.0"),
1385 stable(feature = "atomic_access", since = "1.15.0"),
1386 usize AtomicUsize ATOMIC_USIZE_INIT
1390 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1397 __Nonexhaustive => __Nonexhaustive,
1402 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1404 Release => intrinsics::atomic_store_rel(dst, val),
1405 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1406 SeqCst => intrinsics::atomic_store(dst, val),
1407 Acquire => panic!("there is no such thing as an acquire store"),
1408 AcqRel => panic!("there is no such thing as an acquire/release store"),
1409 __Nonexhaustive => panic!("invalid memory ordering"),
1414 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1416 Acquire => intrinsics::atomic_load_acq(dst),
1417 Relaxed => intrinsics::atomic_load_relaxed(dst),
1418 SeqCst => intrinsics::atomic_load(dst),
1419 Release => panic!("there is no such thing as a release load"),
1420 AcqRel => panic!("there is no such thing as an acquire/release load"),
1421 __Nonexhaustive => panic!("invalid memory ordering"),
1426 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1428 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1429 Release => intrinsics::atomic_xchg_rel(dst, val),
1430 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1431 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1432 SeqCst => intrinsics::atomic_xchg(dst, val),
1433 __Nonexhaustive => panic!("invalid memory ordering"),
1437 /// Returns the previous value (like __sync_fetch_and_add).
1439 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1441 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1442 Release => intrinsics::atomic_xadd_rel(dst, val),
1443 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1444 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1445 SeqCst => intrinsics::atomic_xadd(dst, val),
1446 __Nonexhaustive => panic!("invalid memory ordering"),
1450 /// Returns the previous value (like __sync_fetch_and_sub).
1452 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1454 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1455 Release => intrinsics::atomic_xsub_rel(dst, val),
1456 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1457 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1458 SeqCst => intrinsics::atomic_xsub(dst, val),
1459 __Nonexhaustive => panic!("invalid memory ordering"),
1464 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1470 let (val, ok) = match (success, failure) {
1471 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1472 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1473 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1474 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1475 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1476 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1477 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1478 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1479 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1480 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1481 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1482 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1483 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1484 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1486 if ok { Ok(val) } else { Err(val) }
1490 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1496 let (val, ok) = match (success, failure) {
1497 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1498 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1499 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1500 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1501 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1502 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1503 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1504 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1505 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1506 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1507 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1508 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1509 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1510 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1512 if ok { Ok(val) } else { Err(val) }
1516 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1518 Acquire => intrinsics::atomic_and_acq(dst, val),
1519 Release => intrinsics::atomic_and_rel(dst, val),
1520 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1521 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1522 SeqCst => intrinsics::atomic_and(dst, val),
1523 __Nonexhaustive => panic!("invalid memory ordering"),
1528 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1530 Acquire => intrinsics::atomic_or_acq(dst, val),
1531 Release => intrinsics::atomic_or_rel(dst, val),
1532 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1533 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1534 SeqCst => intrinsics::atomic_or(dst, val),
1535 __Nonexhaustive => panic!("invalid memory ordering"),
1540 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1542 Acquire => intrinsics::atomic_xor_acq(dst, val),
1543 Release => intrinsics::atomic_xor_rel(dst, val),
1544 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1545 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1546 SeqCst => intrinsics::atomic_xor(dst, val),
1547 __Nonexhaustive => panic!("invalid memory ordering"),
1551 /// An atomic fence.
1553 /// A fence 'A' which has [`Release`] ordering semantics, synchronizes with a
1554 /// fence 'B' with (at least) [`Acquire`] semantics, if and only if there exists
1555 /// atomic operations X and Y, both operating on some atomic object 'M' such
1556 /// that A is sequenced before X, Y is synchronized before B and Y observes
1557 /// the change to M. This provides a happens-before dependence between A and B.
1559 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1562 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1563 /// and [`Release`] semantics, participates in the global program order of the
1564 /// other [`SeqCst`] operations and/or fences.
1566 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1570 /// Panics if `order` is [`Relaxed`].
1572 /// [`Ordering`]: enum.Ordering.html
1573 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1574 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1575 /// [`Release`]: enum.Ordering.html#variant.Release
1576 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1577 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1579 #[stable(feature = "rust1", since = "1.0.0")]
1580 pub fn fence(order: Ordering) {
1583 Acquire => intrinsics::atomic_fence_acq(),
1584 Release => intrinsics::atomic_fence_rel(),
1585 AcqRel => intrinsics::atomic_fence_acqrel(),
1586 SeqCst => intrinsics::atomic_fence(),
1587 Relaxed => panic!("there is no such thing as a relaxed fence"),
1588 __Nonexhaustive => panic!("invalid memory ordering"),
1594 #[cfg(target_has_atomic = "8")]
1595 #[stable(feature = "atomic_debug", since = "1.3.0")]
1596 impl fmt::Debug for AtomicBool {
1597 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1598 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1602 #[cfg(target_has_atomic = "ptr")]
1603 #[stable(feature = "atomic_debug", since = "1.3.0")]
1604 impl<T> fmt::Debug for AtomicPtr<T> {
1605 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1606 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()