1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering::*;
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
99 /// This function is deliberately more primitive than
100 /// `std::thread::yield_now` and does not directly yield to the
101 /// system's scheduler. In some cases it might be useful to use a
102 /// combination of both functions. Careful benchmarking is advised.
104 /// On some platforms this function may not do anything at all.
106 #[unstable(feature = "hint_core_should_pause", issue = "41196")]
107 pub fn hint_core_should_pause()
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
111 asm!("pause" ::: "memory" : "volatile");
114 #[cfg(target_arch = "aarch64")]
116 asm!("yield" ::: "memory" : "volatile");
120 /// A boolean type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a `bool`.
123 #[cfg(target_has_atomic = "8")]
124 #[stable(feature = "rust1", since = "1.0.0")]
125 pub struct AtomicBool {
129 #[cfg(target_has_atomic = "8")]
130 #[stable(feature = "rust1", since = "1.0.0")]
131 impl Default for AtomicBool {
132 /// Creates an `AtomicBool` initialized to `false`.
133 fn default() -> Self {
138 // Send is implicitly implemented for AtomicBool.
139 #[cfg(target_has_atomic = "8")]
140 #[stable(feature = "rust1", since = "1.0.0")]
141 unsafe impl Sync for AtomicBool {}
143 /// A raw pointer type which can be safely shared between threads.
145 /// This type has the same in-memory representation as a `*mut T`.
146 #[cfg(target_has_atomic = "ptr")]
147 #[stable(feature = "rust1", since = "1.0.0")]
148 pub struct AtomicPtr<T> {
149 p: UnsafeCell<*mut T>,
152 #[cfg(target_has_atomic = "ptr")]
153 #[stable(feature = "rust1", since = "1.0.0")]
154 impl<T> Default for AtomicPtr<T> {
155 /// Creates a null `AtomicPtr<T>`.
156 fn default() -> AtomicPtr<T> {
157 AtomicPtr::new(::ptr::null_mut())
161 #[cfg(target_has_atomic = "ptr")]
162 #[stable(feature = "rust1", since = "1.0.0")]
163 unsafe impl<T> Send for AtomicPtr<T> {}
164 #[cfg(target_has_atomic = "ptr")]
165 #[stable(feature = "rust1", since = "1.0.0")]
166 unsafe impl<T> Sync for AtomicPtr<T> {}
168 /// Atomic memory orderings
170 /// Memory orderings limit the ways that both the compiler and CPU may reorder
171 /// instructions around atomic operations. At its most restrictive,
172 /// "sequentially consistent" atomics allow neither reads nor writes
173 /// to be moved either before or after the atomic operation; on the other end
174 /// "relaxed" atomics allow all reorderings.
176 /// Rust's memory orderings are [the same as
177 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
179 /// For more information see the [nomicon].
181 /// [nomicon]: ../../../nomicon/atomics.html
182 #[stable(feature = "rust1", since = "1.0.0")]
183 #[derive(Copy, Clone, Debug)]
185 /// No ordering constraints, only atomic operations.
187 /// Corresponds to LLVM's [`Monotonic`] ordering.
189 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
190 #[stable(feature = "rust1", since = "1.0.0")]
192 /// When coupled with a store, all previous writes become visible
193 /// to the other threads that perform a load with [`Acquire`] ordering
194 /// on the same value.
196 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
197 #[stable(feature = "rust1", since = "1.0.0")]
199 /// When coupled with a load, all subsequent loads will see data
200 /// written before a store with [`Release`] ordering on the same value
201 /// in other threads.
203 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
204 #[stable(feature = "rust1", since = "1.0.0")]
206 /// When coupled with a load, uses [`Acquire`] ordering, and with a store
207 /// [`Release`] ordering.
209 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
210 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
211 #[stable(feature = "rust1", since = "1.0.0")]
213 /// Like `AcqRel` with the additional guarantee that all threads see all
214 /// sequentially consistent operations in the same order.
215 #[stable(feature = "rust1", since = "1.0.0")]
217 // Prevent exhaustive matching to allow for future extension
219 #[unstable(feature = "future_atomic_orderings", issue = "0")]
223 /// An [`AtomicBool`] initialized to `false`.
225 /// [`AtomicBool`]: struct.AtomicBool.html
226 #[cfg(target_has_atomic = "8")]
227 #[stable(feature = "rust1", since = "1.0.0")]
228 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
230 #[cfg(target_has_atomic = "8")]
232 /// Creates a new `AtomicBool`.
237 /// use std::sync::atomic::AtomicBool;
239 /// let atomic_true = AtomicBool::new(true);
240 /// let atomic_false = AtomicBool::new(false);
243 #[stable(feature = "rust1", since = "1.0.0")]
244 pub const fn new(v: bool) -> AtomicBool {
245 AtomicBool { v: UnsafeCell::new(v as u8) }
248 /// Returns a mutable reference to the underlying `bool`.
250 /// This is safe because the mutable reference guarantees that no other threads are
251 /// concurrently accessing the atomic data.
256 /// use std::sync::atomic::{AtomicBool, Ordering};
258 /// let mut some_bool = AtomicBool::new(true);
259 /// assert_eq!(*some_bool.get_mut(), true);
260 /// *some_bool.get_mut() = false;
261 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
264 #[stable(feature = "atomic_access", since = "1.15.0")]
265 pub fn get_mut(&mut self) -> &mut bool {
266 unsafe { &mut *(self.v.get() as *mut bool) }
269 /// Consumes the atomic and returns the contained value.
271 /// This is safe because passing `self` by value guarantees that no other threads are
272 /// concurrently accessing the atomic data.
277 /// use std::sync::atomic::AtomicBool;
279 /// let some_bool = AtomicBool::new(true);
280 /// assert_eq!(some_bool.into_inner(), true);
283 #[stable(feature = "atomic_access", since = "1.15.0")]
284 pub fn into_inner(self) -> bool {
285 unsafe { self.v.into_inner() != 0 }
288 /// Loads a value from the bool.
290 /// `load` takes an [`Ordering`] argument which describes the memory ordering
291 /// of this operation.
295 /// Panics if `order` is [`Release`] or [`AcqRel`].
297 /// [`Ordering`]: enum.Ordering.html
298 /// [`Release`]: enum.Ordering.html#variant.Release
299 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
304 /// use std::sync::atomic::{AtomicBool, Ordering};
306 /// let some_bool = AtomicBool::new(true);
308 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
311 #[stable(feature = "rust1", since = "1.0.0")]
312 pub fn load(&self, order: Ordering) -> bool {
313 unsafe { atomic_load(self.v.get(), order) != 0 }
316 /// Stores a value into the bool.
318 /// `store` takes an [`Ordering`] argument which describes the memory ordering
319 /// of this operation.
321 /// [`Ordering`]: enum.Ordering.html
326 /// use std::sync::atomic::{AtomicBool, Ordering};
328 /// let some_bool = AtomicBool::new(true);
330 /// some_bool.store(false, Ordering::Relaxed);
331 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
336 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
338 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
339 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
341 #[stable(feature = "rust1", since = "1.0.0")]
342 pub fn store(&self, val: bool, order: Ordering) {
344 atomic_store(self.v.get(), val as u8, order);
348 /// Stores a value into the bool, returning the previous value.
350 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
351 /// of this operation.
353 /// [`Ordering`]: enum.Ordering.html
358 /// use std::sync::atomic::{AtomicBool, Ordering};
360 /// let some_bool = AtomicBool::new(true);
362 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
363 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
366 #[stable(feature = "rust1", since = "1.0.0")]
367 pub fn swap(&self, val: bool, order: Ordering) -> bool {
368 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
371 /// Stores a value into the `bool` if the current value is the same as the `current` value.
373 /// The return value is always the previous value. If it is equal to `current`, then the value
376 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
377 /// ordering of this operation.
379 /// [`Ordering`]: enum.Ordering.html
384 /// use std::sync::atomic::{AtomicBool, Ordering};
386 /// let some_bool = AtomicBool::new(true);
388 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
389 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
391 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
392 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
395 #[stable(feature = "rust1", since = "1.0.0")]
396 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
397 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
403 /// Stores a value into the `bool` if the current value is the same as the `current` value.
405 /// The return value is a result indicating whether the new value was written and containing
406 /// the previous value. On success this value is guaranteed to be equal to `current`.
408 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
409 /// ordering of this operation. The first describes the required ordering if the
410 /// operation succeeds while the second describes the required ordering when the
411 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
412 /// be equivalent or weaker than the success ordering.
414 /// [`Ordering`]: enum.Ordering.html
415 /// [`Release`]: enum.Ordering.html#variant.Release
416 /// [`AcqRel`]: enum.Ordering.html#variant.Release
421 /// use std::sync::atomic::{AtomicBool, Ordering};
423 /// let some_bool = AtomicBool::new(true);
425 /// assert_eq!(some_bool.compare_exchange(true,
427 /// Ordering::Acquire,
428 /// Ordering::Relaxed),
430 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
432 /// assert_eq!(some_bool.compare_exchange(true, true,
433 /// Ordering::SeqCst,
434 /// Ordering::Acquire),
436 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
439 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
440 pub fn compare_exchange(&self,
445 -> Result<bool, bool> {
447 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
450 Err(x) => Err(x != 0),
454 /// Stores a value into the `bool` if the current value is the same as the `current` value.
456 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
457 /// comparison succeeds, which can result in more efficient code on some platforms. The
458 /// return value is a result indicating whether the new value was written and containing the
461 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
462 /// ordering of this operation. The first describes the required ordering if the operation
463 /// succeeds while the second describes the required ordering when the operation fails. The
464 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
465 /// weaker than the success ordering.
467 /// [`compare_exchange`]: #method.compare_exchange
468 /// [`Ordering`]: enum.Ordering.html
469 /// [`Release`]: enum.Ordering.html#variant.Release
470 /// [`AcqRel`]: enum.Ordering.html#variant.Release
475 /// use std::sync::atomic::{AtomicBool, Ordering};
477 /// let val = AtomicBool::new(false);
480 /// let mut old = val.load(Ordering::Relaxed);
482 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
484 /// Err(x) => old = x,
489 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
490 pub fn compare_exchange_weak(&self,
495 -> Result<bool, bool> {
497 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
500 Err(x) => Err(x != 0),
504 /// Logical "and" with a boolean value.
506 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
507 /// the new value to the result.
509 /// Returns the previous value.
514 /// use std::sync::atomic::{AtomicBool, Ordering};
516 /// let foo = AtomicBool::new(true);
517 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
518 /// assert_eq!(foo.load(Ordering::SeqCst), false);
520 /// let foo = AtomicBool::new(true);
521 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
522 /// assert_eq!(foo.load(Ordering::SeqCst), true);
524 /// let foo = AtomicBool::new(false);
525 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
526 /// assert_eq!(foo.load(Ordering::SeqCst), false);
529 #[stable(feature = "rust1", since = "1.0.0")]
530 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
531 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
534 /// Logical "nand" with a boolean value.
536 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
537 /// the new value to the result.
539 /// Returns the previous value.
544 /// use std::sync::atomic::{AtomicBool, Ordering};
546 /// let foo = AtomicBool::new(true);
547 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
548 /// assert_eq!(foo.load(Ordering::SeqCst), true);
550 /// let foo = AtomicBool::new(true);
551 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
552 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
553 /// assert_eq!(foo.load(Ordering::SeqCst), false);
555 /// let foo = AtomicBool::new(false);
556 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
557 /// assert_eq!(foo.load(Ordering::SeqCst), true);
560 #[stable(feature = "rust1", since = "1.0.0")]
561 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
562 // We can't use atomic_nand here because it can result in a bool with
563 // an invalid value. This happens because the atomic operation is done
564 // with an 8-bit integer internally, which would set the upper 7 bits.
565 // So we just use fetch_xor or swap instead.
568 // We must invert the bool.
569 self.fetch_xor(true, order)
571 // !(x & false) == true
572 // We must set the bool to true.
573 self.swap(true, order)
577 /// Logical "or" with a boolean value.
579 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
580 /// new value to the result.
582 /// Returns the previous value.
587 /// use std::sync::atomic::{AtomicBool, Ordering};
589 /// let foo = AtomicBool::new(true);
590 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
591 /// assert_eq!(foo.load(Ordering::SeqCst), true);
593 /// let foo = AtomicBool::new(true);
594 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
595 /// assert_eq!(foo.load(Ordering::SeqCst), true);
597 /// let foo = AtomicBool::new(false);
598 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
599 /// assert_eq!(foo.load(Ordering::SeqCst), false);
602 #[stable(feature = "rust1", since = "1.0.0")]
603 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
604 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
607 /// Logical "xor" with a boolean value.
609 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
610 /// the new value to the result.
612 /// Returns the previous value.
617 /// use std::sync::atomic::{AtomicBool, Ordering};
619 /// let foo = AtomicBool::new(true);
620 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
621 /// assert_eq!(foo.load(Ordering::SeqCst), true);
623 /// let foo = AtomicBool::new(true);
624 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
625 /// assert_eq!(foo.load(Ordering::SeqCst), false);
627 /// let foo = AtomicBool::new(false);
628 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
629 /// assert_eq!(foo.load(Ordering::SeqCst), false);
632 #[stable(feature = "rust1", since = "1.0.0")]
633 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
634 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
638 #[cfg(target_has_atomic = "ptr")]
639 impl<T> AtomicPtr<T> {
640 /// Creates a new `AtomicPtr`.
645 /// use std::sync::atomic::AtomicPtr;
647 /// let ptr = &mut 5;
648 /// let atomic_ptr = AtomicPtr::new(ptr);
651 #[stable(feature = "rust1", since = "1.0.0")]
652 pub const fn new(p: *mut T) -> AtomicPtr<T> {
653 AtomicPtr { p: UnsafeCell::new(p) }
656 /// Returns a mutable reference to the underlying pointer.
658 /// This is safe because the mutable reference guarantees that no other threads are
659 /// concurrently accessing the atomic data.
664 /// use std::sync::atomic::{AtomicPtr, Ordering};
666 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
667 /// *atomic_ptr.get_mut() = &mut 5;
668 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
671 #[stable(feature = "atomic_access", since = "1.15.0")]
672 pub fn get_mut(&mut self) -> &mut *mut T {
673 unsafe { &mut *self.p.get() }
676 /// Consumes the atomic and returns the contained value.
678 /// This is safe because passing `self` by value guarantees that no other threads are
679 /// concurrently accessing the atomic data.
684 /// use std::sync::atomic::AtomicPtr;
686 /// let atomic_ptr = AtomicPtr::new(&mut 5);
687 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
690 #[stable(feature = "atomic_access", since = "1.15.0")]
691 pub fn into_inner(self) -> *mut T {
692 unsafe { self.p.into_inner() }
695 /// Loads a value from the pointer.
697 /// `load` takes an [`Ordering`] argument which describes the memory ordering
698 /// of this operation.
702 /// Panics if `order` is [`Release`] or [`AcqRel`].
704 /// [`Ordering`]: enum.Ordering.html
705 /// [`Release`]: enum.Ordering.html#variant.Release
706 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
711 /// use std::sync::atomic::{AtomicPtr, Ordering};
713 /// let ptr = &mut 5;
714 /// let some_ptr = AtomicPtr::new(ptr);
716 /// let value = some_ptr.load(Ordering::Relaxed);
719 #[stable(feature = "rust1", since = "1.0.0")]
720 pub fn load(&self, order: Ordering) -> *mut T {
721 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
724 /// Stores a value into the pointer.
726 /// `store` takes an [`Ordering`] argument which describes the memory ordering
727 /// of this operation.
729 /// [`Ordering`]: enum.Ordering.html
734 /// use std::sync::atomic::{AtomicPtr, Ordering};
736 /// let ptr = &mut 5;
737 /// let some_ptr = AtomicPtr::new(ptr);
739 /// let other_ptr = &mut 10;
741 /// some_ptr.store(other_ptr, Ordering::Relaxed);
746 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
748 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
749 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
751 #[stable(feature = "rust1", since = "1.0.0")]
752 pub fn store(&self, ptr: *mut T, order: Ordering) {
754 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
758 /// Stores a value into the pointer, returning the previous value.
760 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
761 /// of this operation.
763 /// [`Ordering`]: enum.Ordering.html
768 /// use std::sync::atomic::{AtomicPtr, Ordering};
770 /// let ptr = &mut 5;
771 /// let some_ptr = AtomicPtr::new(ptr);
773 /// let other_ptr = &mut 10;
775 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
778 #[stable(feature = "rust1", since = "1.0.0")]
779 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
780 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
783 /// Stores a value into the pointer if the current value is the same as the `current` value.
785 /// The return value is always the previous value. If it is equal to `current`, then the value
788 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
789 /// ordering of this operation.
791 /// [`Ordering`]: enum.Ordering.html
796 /// use std::sync::atomic::{AtomicPtr, Ordering};
798 /// let ptr = &mut 5;
799 /// let some_ptr = AtomicPtr::new(ptr);
801 /// let other_ptr = &mut 10;
802 /// let another_ptr = &mut 10;
804 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
807 #[stable(feature = "rust1", since = "1.0.0")]
808 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
809 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
815 /// Stores a value into the pointer if the current value is the same as the `current` value.
817 /// The return value is a result indicating whether the new value was written and containing
818 /// the previous value. On success this value is guaranteed to be equal to `current`.
820 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
821 /// ordering of this operation. The first describes the required ordering if
822 /// the operation succeeds while the second describes the required ordering when
823 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
824 /// and must be equivalent or weaker than the success ordering.
826 /// [`Ordering`]: enum.Ordering.html
827 /// [`Release`]: enum.Ordering.html#variant.Release
828 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
833 /// use std::sync::atomic::{AtomicPtr, Ordering};
835 /// let ptr = &mut 5;
836 /// let some_ptr = AtomicPtr::new(ptr);
838 /// let other_ptr = &mut 10;
839 /// let another_ptr = &mut 10;
841 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
842 /// Ordering::SeqCst, Ordering::Relaxed);
845 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
846 pub fn compare_exchange(&self,
851 -> Result<*mut T, *mut T> {
853 let res = atomic_compare_exchange(self.p.get() as *mut usize,
859 Ok(x) => Ok(x as *mut T),
860 Err(x) => Err(x as *mut T),
865 /// Stores a value into the pointer if the current value is the same as the `current` value.
867 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
868 /// comparison succeeds, which can result in more efficient code on some platforms. The
869 /// return value is a result indicating whether the new value was written and containing the
872 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
873 /// ordering of this operation. The first describes the required ordering if the operation
874 /// succeeds while the second describes the required ordering when the operation fails. The
875 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
876 /// weaker than the success ordering.
878 /// [`compare_exchange`]: #method.compare_exchange
879 /// [`Ordering`]: enum.Ordering.html
880 /// [`Release`]: enum.Ordering.html#variant.Release
881 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
886 /// use std::sync::atomic::{AtomicPtr, Ordering};
888 /// let some_ptr = AtomicPtr::new(&mut 5);
890 /// let new = &mut 10;
891 /// let mut old = some_ptr.load(Ordering::Relaxed);
893 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
895 /// Err(x) => old = x,
900 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
901 pub fn compare_exchange_weak(&self,
906 -> Result<*mut T, *mut T> {
908 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
914 Ok(x) => Ok(x as *mut T),
915 Err(x) => Err(x as *mut T),
921 #[cfg(target_has_atomic = "ptr")]
922 macro_rules! atomic_int {
927 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
928 /// An integer type which can be safely shared between threads.
930 /// This type has the same in-memory representation as the underlying integer type.
932 pub struct $atomic_type {
933 v: UnsafeCell<$int_type>,
936 /// An atomic integer initialized to `0`.
938 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
941 impl Default for $atomic_type {
942 fn default() -> Self {
943 Self::new(Default::default())
948 impl fmt::Debug for $atomic_type {
949 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
950 f.debug_tuple(stringify!($atomic_type))
951 .field(&self.load(Ordering::SeqCst))
956 // Send is implicitly implemented.
958 unsafe impl Sync for $atomic_type {}
961 /// Creates a new atomic integer.
966 /// use std::sync::atomic::AtomicIsize;
968 /// let atomic_forty_two = AtomicIsize::new(42);
972 pub const fn new(v: $int_type) -> Self {
973 $atomic_type {v: UnsafeCell::new(v)}
976 /// Returns a mutable reference to the underlying integer.
978 /// This is safe because the mutable reference guarantees that no other threads are
979 /// concurrently accessing the atomic data.
984 /// use std::sync::atomic::{AtomicIsize, Ordering};
986 /// let mut some_isize = AtomicIsize::new(10);
987 /// assert_eq!(*some_isize.get_mut(), 10);
988 /// *some_isize.get_mut() = 5;
989 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
993 pub fn get_mut(&mut self) -> &mut $int_type {
994 unsafe { &mut *self.v.get() }
997 /// Consumes the atomic and returns the contained value.
999 /// This is safe because passing `self` by value guarantees that no other threads are
1000 /// concurrently accessing the atomic data.
1005 /// use std::sync::atomic::AtomicIsize;
1007 /// let some_isize = AtomicIsize::new(5);
1008 /// assert_eq!(some_isize.into_inner(), 5);
1012 pub fn into_inner(self) -> $int_type {
1013 unsafe { self.v.into_inner() }
1016 /// Loads a value from the atomic integer.
1018 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this
1023 /// Panics if `order` is [`Release`] or [`AcqRel`].
1025 /// [`Ordering`]: enum.Ordering.html
1026 /// [`Release`]: enum.Ordering.html#variant.Release
1027 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1032 /// use std::sync::atomic::{AtomicIsize, Ordering};
1034 /// let some_isize = AtomicIsize::new(5);
1036 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
1040 pub fn load(&self, order: Ordering) -> $int_type {
1041 unsafe { atomic_load(self.v.get(), order) }
1044 /// Stores a value into the atomic integer.
1046 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this
1049 /// [`Ordering`]: enum.Ordering.html
1054 /// use std::sync::atomic::{AtomicIsize, Ordering};
1056 /// let some_isize = AtomicIsize::new(5);
1058 /// some_isize.store(10, Ordering::Relaxed);
1059 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1064 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1066 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1067 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1070 pub fn store(&self, val: $int_type, order: Ordering) {
1071 unsafe { atomic_store(self.v.get(), val, order); }
1074 /// Stores a value into the atomic integer, returning the previous value.
1076 /// `swap` takes an [`Ordering`] argument which describes the memory ordering of this
1079 /// [`Ordering`]: enum.Ordering.html
1084 /// use std::sync::atomic::{AtomicIsize, Ordering};
1086 /// let some_isize = AtomicIsize::new(5);
1088 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
1092 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1093 unsafe { atomic_swap(self.v.get(), val, order) }
1096 /// Stores a value into the atomic integer if the current value is the same as the
1097 /// `current` value.
1099 /// The return value is always the previous value. If it is equal to `current`, then the
1100 /// value was updated.
1102 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1103 /// ordering of this operation.
1105 /// [`Ordering`]: enum.Ordering.html
1110 /// use std::sync::atomic::{AtomicIsize, Ordering};
1112 /// let some_isize = AtomicIsize::new(5);
1114 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1115 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1117 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1118 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1122 pub fn compare_and_swap(&self,
1125 order: Ordering) -> $int_type {
1126 match self.compare_exchange(current,
1129 strongest_failure_ordering(order)) {
1135 /// Stores a value into the atomic integer if the current value is the same as the
1136 /// `current` value.
1138 /// The return value is a result indicating whether the new value was written and
1139 /// containing the previous value. On success this value is guaranteed to be equal to
1142 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1143 /// ordering of this operation. The first describes the required ordering if
1144 /// the operation succeeds while the second describes the required ordering when
1145 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1146 /// must be equivalent or weaker than the success ordering.
1148 /// [`Ordering`]: enum.Ordering.html
1149 /// [`Release`]: enum.Ordering.html#variant.Release
1150 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1155 /// use std::sync::atomic::{AtomicIsize, Ordering};
1157 /// let some_isize = AtomicIsize::new(5);
1159 /// assert_eq!(some_isize.compare_exchange(5, 10,
1160 /// Ordering::Acquire,
1161 /// Ordering::Relaxed),
1163 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1165 /// assert_eq!(some_isize.compare_exchange(6, 12,
1166 /// Ordering::SeqCst,
1167 /// Ordering::Acquire),
1169 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1173 pub fn compare_exchange(&self,
1177 failure: Ordering) -> Result<$int_type, $int_type> {
1178 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1181 /// Stores a value into the atomic integer if the current value is the same as the
1182 /// `current` value.
1184 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1185 /// when the comparison succeeds, which can result in more efficient code on some
1186 /// platforms. The return value is a result indicating whether the new value was
1187 /// written and containing the previous value.
1189 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1190 /// ordering of this operation. The first describes the required ordering if the
1191 /// operation succeeds while the second describes the required ordering when the
1192 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1193 /// must be equivalent or weaker than the success ordering.
1195 /// [`compare_exchange`]: #method.compare_exchange
1196 /// [`Ordering`]: enum.Ordering.html
1197 /// [`Release`]: enum.Ordering.html#variant.Release
1198 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1203 /// use std::sync::atomic::{AtomicIsize, Ordering};
1205 /// let val = AtomicIsize::new(4);
1207 /// let mut old = val.load(Ordering::Relaxed);
1209 /// let new = old * 2;
1210 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1212 /// Err(x) => old = x,
1218 pub fn compare_exchange_weak(&self,
1222 failure: Ordering) -> Result<$int_type, $int_type> {
1224 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1228 /// Adds to the current value, returning the previous value.
1230 /// This operation wraps around on overflow.
1235 /// use std::sync::atomic::{AtomicIsize, Ordering};
1237 /// let foo = AtomicIsize::new(0);
1238 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1239 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1243 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1244 unsafe { atomic_add(self.v.get(), val, order) }
1247 /// Subtracts from the current value, returning the previous value.
1249 /// This operation wraps around on overflow.
1254 /// use std::sync::atomic::{AtomicIsize, Ordering};
1256 /// let foo = AtomicIsize::new(0);
1257 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1258 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1262 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1263 unsafe { atomic_sub(self.v.get(), val, order) }
1266 /// Bitwise "and" with the current value.
1268 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1269 /// sets the new value to the result.
1271 /// Returns the previous value.
1276 /// use std::sync::atomic::{AtomicIsize, Ordering};
1278 /// let foo = AtomicIsize::new(0b101101);
1279 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1280 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1283 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1284 unsafe { atomic_and(self.v.get(), val, order) }
1287 /// Bitwise "or" with the current value.
1289 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1290 /// sets the new value to the result.
1292 /// Returns the previous value.
1297 /// use std::sync::atomic::{AtomicIsize, Ordering};
1299 /// let foo = AtomicIsize::new(0b101101);
1300 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1301 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1304 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1305 unsafe { atomic_or(self.v.get(), val, order) }
1308 /// Bitwise "xor" with the current value.
1310 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1311 /// sets the new value to the result.
1313 /// Returns the previous value.
1318 /// use std::sync::atomic::{AtomicIsize, Ordering};
1320 /// let foo = AtomicIsize::new(0b101101);
1321 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1322 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1325 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1326 unsafe { atomic_xor(self.v.get(), val, order) }
1332 #[cfg(target_has_atomic = "8")]
1334 unstable(feature = "integer_atomics", issue = "32976"),
1335 unstable(feature = "integer_atomics", issue = "32976"),
1336 unstable(feature = "integer_atomics", issue = "32976"),
1337 unstable(feature = "integer_atomics", issue = "32976"),
1338 i8 AtomicI8 ATOMIC_I8_INIT
1340 #[cfg(target_has_atomic = "8")]
1342 unstable(feature = "integer_atomics", issue = "32976"),
1343 unstable(feature = "integer_atomics", issue = "32976"),
1344 unstable(feature = "integer_atomics", issue = "32976"),
1345 unstable(feature = "integer_atomics", issue = "32976"),
1346 u8 AtomicU8 ATOMIC_U8_INIT
1348 #[cfg(target_has_atomic = "16")]
1350 unstable(feature = "integer_atomics", issue = "32976"),
1351 unstable(feature = "integer_atomics", issue = "32976"),
1352 unstable(feature = "integer_atomics", issue = "32976"),
1353 unstable(feature = "integer_atomics", issue = "32976"),
1354 i16 AtomicI16 ATOMIC_I16_INIT
1356 #[cfg(target_has_atomic = "16")]
1358 unstable(feature = "integer_atomics", issue = "32976"),
1359 unstable(feature = "integer_atomics", issue = "32976"),
1360 unstable(feature = "integer_atomics", issue = "32976"),
1361 unstable(feature = "integer_atomics", issue = "32976"),
1362 u16 AtomicU16 ATOMIC_U16_INIT
1364 #[cfg(target_has_atomic = "32")]
1366 unstable(feature = "integer_atomics", issue = "32976"),
1367 unstable(feature = "integer_atomics", issue = "32976"),
1368 unstable(feature = "integer_atomics", issue = "32976"),
1369 unstable(feature = "integer_atomics", issue = "32976"),
1370 i32 AtomicI32 ATOMIC_I32_INIT
1372 #[cfg(target_has_atomic = "32")]
1374 unstable(feature = "integer_atomics", issue = "32976"),
1375 unstable(feature = "integer_atomics", issue = "32976"),
1376 unstable(feature = "integer_atomics", issue = "32976"),
1377 unstable(feature = "integer_atomics", issue = "32976"),
1378 u32 AtomicU32 ATOMIC_U32_INIT
1380 #[cfg(target_has_atomic = "64")]
1382 unstable(feature = "integer_atomics", issue = "32976"),
1383 unstable(feature = "integer_atomics", issue = "32976"),
1384 unstable(feature = "integer_atomics", issue = "32976"),
1385 unstable(feature = "integer_atomics", issue = "32976"),
1386 i64 AtomicI64 ATOMIC_I64_INIT
1388 #[cfg(target_has_atomic = "64")]
1390 unstable(feature = "integer_atomics", issue = "32976"),
1391 unstable(feature = "integer_atomics", issue = "32976"),
1392 unstable(feature = "integer_atomics", issue = "32976"),
1393 unstable(feature = "integer_atomics", issue = "32976"),
1394 u64 AtomicU64 ATOMIC_U64_INIT
1396 #[cfg(target_has_atomic = "ptr")]
1398 stable(feature = "rust1", since = "1.0.0"),
1399 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1400 stable(feature = "atomic_debug", since = "1.3.0"),
1401 stable(feature = "atomic_access", since = "1.15.0"),
1402 isize AtomicIsize ATOMIC_ISIZE_INIT
1404 #[cfg(target_has_atomic = "ptr")]
1406 stable(feature = "rust1", since = "1.0.0"),
1407 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1408 stable(feature = "atomic_debug", since = "1.3.0"),
1409 stable(feature = "atomic_access", since = "1.15.0"),
1410 usize AtomicUsize ATOMIC_USIZE_INIT
1414 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1421 __Nonexhaustive => __Nonexhaustive,
1426 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1428 Release => intrinsics::atomic_store_rel(dst, val),
1429 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1430 SeqCst => intrinsics::atomic_store(dst, val),
1431 Acquire => panic!("there is no such thing as an acquire store"),
1432 AcqRel => panic!("there is no such thing as an acquire/release store"),
1433 __Nonexhaustive => panic!("invalid memory ordering"),
1438 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1440 Acquire => intrinsics::atomic_load_acq(dst),
1441 Relaxed => intrinsics::atomic_load_relaxed(dst),
1442 SeqCst => intrinsics::atomic_load(dst),
1443 Release => panic!("there is no such thing as a release load"),
1444 AcqRel => panic!("there is no such thing as an acquire/release load"),
1445 __Nonexhaustive => panic!("invalid memory ordering"),
1450 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1452 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1453 Release => intrinsics::atomic_xchg_rel(dst, val),
1454 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1455 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1456 SeqCst => intrinsics::atomic_xchg(dst, val),
1457 __Nonexhaustive => panic!("invalid memory ordering"),
1461 /// Returns the previous value (like __sync_fetch_and_add).
1463 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1465 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1466 Release => intrinsics::atomic_xadd_rel(dst, val),
1467 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1468 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1469 SeqCst => intrinsics::atomic_xadd(dst, val),
1470 __Nonexhaustive => panic!("invalid memory ordering"),
1474 /// Returns the previous value (like __sync_fetch_and_sub).
1476 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1478 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1479 Release => intrinsics::atomic_xsub_rel(dst, val),
1480 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1481 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1482 SeqCst => intrinsics::atomic_xsub(dst, val),
1483 __Nonexhaustive => panic!("invalid memory ordering"),
1488 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1494 let (val, ok) = match (success, failure) {
1495 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1496 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1497 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1498 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1499 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1500 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1501 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1502 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1503 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1504 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1505 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1506 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1507 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1508 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1510 if ok { Ok(val) } else { Err(val) }
1514 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1520 let (val, ok) = match (success, failure) {
1521 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1522 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1523 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1524 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1525 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1526 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1527 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1528 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1529 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1530 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1531 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1532 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1533 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1534 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1536 if ok { Ok(val) } else { Err(val) }
1540 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1542 Acquire => intrinsics::atomic_and_acq(dst, val),
1543 Release => intrinsics::atomic_and_rel(dst, val),
1544 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1545 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1546 SeqCst => intrinsics::atomic_and(dst, val),
1547 __Nonexhaustive => panic!("invalid memory ordering"),
1552 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1554 Acquire => intrinsics::atomic_or_acq(dst, val),
1555 Release => intrinsics::atomic_or_rel(dst, val),
1556 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1557 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1558 SeqCst => intrinsics::atomic_or(dst, val),
1559 __Nonexhaustive => panic!("invalid memory ordering"),
1564 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1566 Acquire => intrinsics::atomic_xor_acq(dst, val),
1567 Release => intrinsics::atomic_xor_rel(dst, val),
1568 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1569 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1570 SeqCst => intrinsics::atomic_xor(dst, val),
1571 __Nonexhaustive => panic!("invalid memory ordering"),
1575 /// An atomic fence.
1577 /// Depending on the specified order, a fence prevents the compiler and CPU from
1578 /// reordering certain types of memory operations around it.
1579 /// That creates synchronizes-with relationships between it and atomic operations
1580 /// or fences in other threads.
1582 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
1583 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
1584 /// exist operations X and Y, both operating on some atomic object 'M' such
1585 /// that A is sequenced before X, Y is synchronized before B and Y observes
1586 /// the change to M. This provides a happens-before dependence between A and B.
1589 /// Thread 1 Thread 2
1591 /// fence(Release); A --------------
1592 /// x.store(3, Relaxed); X --------- |
1595 /// -------------> Y if x.load(Relaxed) == 3 {
1596 /// |-------> B fence(Acquire);
1601 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1604 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1605 /// and [`Release`] semantics, participates in the global program order of the
1606 /// other [`SeqCst`] operations and/or fences.
1608 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1612 /// Panics if `order` is [`Relaxed`].
1617 /// use std::sync::atomic::AtomicBool;
1618 /// use std::sync::atomic::fence;
1619 /// use std::sync::atomic::Ordering;
1621 /// // A mutual exclusion primitive based on spinlock.
1622 /// pub struct Mutex {
1623 /// flag: AtomicBool,
1627 /// pub fn new() -> Mutex {
1629 /// flag: AtomicBool::new(false),
1633 /// pub fn lock(&self) {
1634 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
1635 /// // This fence synchronizes-with store in `unlock`.
1636 /// fence(Ordering::Acquire);
1639 /// pub fn unlock(&self) {
1640 /// self.flag.store(false, Ordering::Release);
1645 /// [`Ordering`]: enum.Ordering.html
1646 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1647 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1648 /// [`Release`]: enum.Ordering.html#variant.Release
1649 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1650 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1652 #[stable(feature = "rust1", since = "1.0.0")]
1653 pub fn fence(order: Ordering) {
1656 Acquire => intrinsics::atomic_fence_acq(),
1657 Release => intrinsics::atomic_fence_rel(),
1658 AcqRel => intrinsics::atomic_fence_acqrel(),
1659 SeqCst => intrinsics::atomic_fence(),
1660 Relaxed => panic!("there is no such thing as a relaxed fence"),
1661 __Nonexhaustive => panic!("invalid memory ordering"),
1667 /// A compiler memory fence.
1669 /// `compiler_fence` does not emit any machine code, but prevents the compiler from re-ordering
1670 /// memory operations across this point. Which reorderings are disallowed is dictated by the given
1671 /// [`Ordering`]. Note that `compiler_fence` does *not* introduce inter-thread memory
1672 /// synchronization; for that, a [`fence`] is needed.
1674 /// The re-ordering prevented by the different ordering semantics are:
1676 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
1677 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
1678 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
1679 /// - with [`AcqRel`], both of the above rules are enforced.
1683 /// Panics if `order` is [`Relaxed`].
1685 /// [`fence`]: fn.fence.html
1686 /// [`Ordering`]: enum.Ordering.html
1687 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1688 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1689 /// [`Release`]: enum.Ordering.html#variant.Release
1690 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1691 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1693 #[unstable(feature = "compiler_fences", issue = "41091")]
1694 pub fn compiler_fence(order: Ordering) {
1697 Acquire => intrinsics::atomic_singlethreadfence_acq(),
1698 Release => intrinsics::atomic_singlethreadfence_rel(),
1699 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
1700 SeqCst => intrinsics::atomic_singlethreadfence(),
1701 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
1702 __Nonexhaustive => panic!("invalid memory ordering"),
1708 #[cfg(target_has_atomic = "8")]
1709 #[stable(feature = "atomic_debug", since = "1.3.0")]
1710 impl fmt::Debug for AtomicBool {
1711 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1712 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1716 #[cfg(target_has_atomic = "ptr")]
1717 #[stable(feature = "atomic_debug", since = "1.3.0")]
1718 impl<T> fmt::Debug for AtomicPtr<T> {
1719 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1720 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()