1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering::*;
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
99 /// This function is deliberately more primitive than
100 /// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and
101 /// does not directly yield to the system's scheduler.
102 /// In some cases it might be useful to use a combination of both functions.
103 /// Careful benchmarking is advised.
105 /// On some platforms this function may not do anything at all.
107 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
108 pub fn spin_loop_hint() {
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
111 asm!("pause" ::: "memory" : "volatile");
114 #[cfg(target_arch = "aarch64")]
116 asm!("yield" ::: "memory" : "volatile");
120 /// A boolean type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a [`bool`].
124 /// [`bool`]: ../../../std/primitive.bool.html
125 #[cfg(target_has_atomic = "8")]
126 #[stable(feature = "rust1", since = "1.0.0")]
128 pub struct AtomicBool {
132 #[cfg(target_has_atomic = "8")]
133 #[stable(feature = "rust1", since = "1.0.0")]
134 impl Default for AtomicBool {
135 /// Creates an `AtomicBool` initialized to `false`.
136 fn default() -> Self {
141 // Send is implicitly implemented for AtomicBool.
142 #[cfg(target_has_atomic = "8")]
143 #[stable(feature = "rust1", since = "1.0.0")]
144 unsafe impl Sync for AtomicBool {}
146 /// A raw pointer type which can be safely shared between threads.
148 /// This type has the same in-memory representation as a `*mut T`.
149 #[cfg(target_has_atomic = "ptr")]
150 #[stable(feature = "rust1", since = "1.0.0")]
151 #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
152 #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
153 #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
154 pub struct AtomicPtr<T> {
155 p: UnsafeCell<*mut T>,
158 #[cfg(target_has_atomic = "ptr")]
159 #[stable(feature = "rust1", since = "1.0.0")]
160 impl<T> Default for AtomicPtr<T> {
161 /// Creates a null `AtomicPtr<T>`.
162 fn default() -> AtomicPtr<T> {
163 AtomicPtr::new(::ptr::null_mut())
167 #[cfg(target_has_atomic = "ptr")]
168 #[stable(feature = "rust1", since = "1.0.0")]
169 unsafe impl<T> Send for AtomicPtr<T> {}
170 #[cfg(target_has_atomic = "ptr")]
171 #[stable(feature = "rust1", since = "1.0.0")]
172 unsafe impl<T> Sync for AtomicPtr<T> {}
174 /// Atomic memory orderings
176 /// Memory orderings specify the way atomic operations synchronize memory.
177 /// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the
178 /// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst]
179 /// operations synchronize other memory while additionally preserving a total order of such
180 /// operations across all threads.
182 /// Rust's memory orderings are [the same as
183 /// LLVM's](https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
185 /// For more information see the [nomicon].
187 /// [nomicon]: ../../../nomicon/atomics.html
188 /// [Ordering::Relaxed]: #variant.Relaxed
189 /// [Ordering::SeqCst]: #variant.SeqCst
190 #[stable(feature = "rust1", since = "1.0.0")]
191 #[derive(Copy, Clone, Debug)]
194 /// No ordering constraints, only atomic operations.
196 /// Corresponds to LLVM's [`Monotonic`] ordering.
198 /// [`Monotonic`]: https://llvm.org/docs/Atomics.html#monotonic
199 #[stable(feature = "rust1", since = "1.0.0")]
201 /// When coupled with a store, all previous operations become ordered
202 /// before any load of this value with [`Acquire`] (or stronger) ordering.
203 /// In particular, all previous writes become visible to all threads
204 /// that perform an [`Acquire`] (or stronger) load of this value.
206 /// Notice that using this ordering for an operation that combines loads
207 /// and stores leads to a [`Relaxed`] load operation!
209 /// This ordering is only applicable for operations that can perform a store.
211 /// Corresponds to LLVM's [`Release`] ordering.
213 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
214 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
215 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
216 #[stable(feature = "rust1", since = "1.0.0")]
218 /// When coupled with a load, if the loaded value was written by a store operation with
219 /// [`Release`] (or stronger) ordering, then all subsequent operations
220 /// become ordered after that store. In particular, all subsequent loads will see data
221 /// written before the store.
223 /// Notice that using this ordering for an operation that combines loads
224 /// and stores leads to a [`Relaxed`] store operation!
226 /// This ordering is only applicable for operations that can perform a load.
228 /// Corresponds to LLVM's [`Acquire`] ordering.
230 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
231 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
232 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
233 #[stable(feature = "rust1", since = "1.0.0")]
235 /// Has the effects of both [`Acquire`] and [`Release`] together:
236 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
238 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
239 /// not performing any store and hence it has just [`Acquire`] ordering. However,
240 /// [`AcqRel`][`AcquireRelease`] will never perform [`Relaxed`] accesses.
242 /// This ordering is only applicable for operations that combine both loads and stores.
244 /// Corresponds to LLVM's [`AcquireRelease`] ordering.
246 /// [`AcquireRelease`]: https://llvm.org/docs/Atomics.html#acquirerelease
247 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
248 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
249 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
250 #[stable(feature = "rust1", since = "1.0.0")]
252 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
253 /// operations, respectively) with the additional guarantee that all threads see all
254 /// sequentially consistent operations in the same order.
256 /// Corresponds to LLVM's [`SequentiallyConsistent`] ordering.
258 /// [`SequentiallyConsistent`]: https://llvm.org/docs/Atomics.html#sequentiallyconsistent
259 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
260 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
261 /// [`AcqRel`]: https://llvm.org/docs/Atomics.html#acquirerelease
262 #[stable(feature = "rust1", since = "1.0.0")]
266 /// An [`AtomicBool`] initialized to `false`.
268 /// [`AtomicBool`]: struct.AtomicBool.html
269 #[cfg(target_has_atomic = "8")]
270 #[stable(feature = "rust1", since = "1.0.0")]
271 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
273 #[cfg(target_has_atomic = "8")]
275 /// Creates a new `AtomicBool`.
280 /// use std::sync::atomic::AtomicBool;
282 /// let atomic_true = AtomicBool::new(true);
283 /// let atomic_false = AtomicBool::new(false);
286 #[stable(feature = "rust1", since = "1.0.0")]
287 pub const fn new(v: bool) -> AtomicBool {
288 AtomicBool { v: UnsafeCell::new(v as u8) }
291 /// Returns a mutable reference to the underlying [`bool`].
293 /// This is safe because the mutable reference guarantees that no other threads are
294 /// concurrently accessing the atomic data.
296 /// [`bool`]: ../../../std/primitive.bool.html
301 /// use std::sync::atomic::{AtomicBool, Ordering};
303 /// let mut some_bool = AtomicBool::new(true);
304 /// assert_eq!(*some_bool.get_mut(), true);
305 /// *some_bool.get_mut() = false;
306 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
309 #[stable(feature = "atomic_access", since = "1.15.0")]
310 pub fn get_mut(&mut self) -> &mut bool {
311 unsafe { &mut *(self.v.get() as *mut bool) }
314 /// Consumes the atomic and returns the contained value.
316 /// This is safe because passing `self` by value guarantees that no other threads are
317 /// concurrently accessing the atomic data.
322 /// use std::sync::atomic::AtomicBool;
324 /// let some_bool = AtomicBool::new(true);
325 /// assert_eq!(some_bool.into_inner(), true);
328 #[stable(feature = "atomic_access", since = "1.15.0")]
329 pub fn into_inner(self) -> bool {
330 self.v.into_inner() != 0
333 /// Loads a value from the bool.
335 /// `load` takes an [`Ordering`] argument which describes the memory ordering
336 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
340 /// Panics if `order` is [`Release`] or [`AcqRel`].
342 /// [`Ordering`]: enum.Ordering.html
343 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
344 /// [`Release`]: enum.Ordering.html#variant.Release
345 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
346 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
347 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
352 /// use std::sync::atomic::{AtomicBool, Ordering};
354 /// let some_bool = AtomicBool::new(true);
356 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
359 #[stable(feature = "rust1", since = "1.0.0")]
360 pub fn load(&self, order: Ordering) -> bool {
361 unsafe { atomic_load(self.v.get(), order) != 0 }
364 /// Stores a value into the bool.
366 /// `store` takes an [`Ordering`] argument which describes the memory ordering
367 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
371 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
373 /// [`Ordering`]: enum.Ordering.html
374 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
375 /// [`Release`]: enum.Ordering.html#variant.Release
376 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
377 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
378 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
383 /// use std::sync::atomic::{AtomicBool, Ordering};
385 /// let some_bool = AtomicBool::new(true);
387 /// some_bool.store(false, Ordering::Relaxed);
388 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
391 #[stable(feature = "rust1", since = "1.0.0")]
392 pub fn store(&self, val: bool, order: Ordering) {
394 atomic_store(self.v.get(), val as u8, order);
398 /// Stores a value into the bool, returning the previous value.
400 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
401 /// of this operation. All ordering modes are possible. Note that using
402 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
403 /// using [`Release`] makes the load part [`Relaxed`].
405 /// [`Ordering`]: enum.Ordering.html
406 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
407 /// [`Release`]: enum.Ordering.html#variant.Release
408 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
413 /// use std::sync::atomic::{AtomicBool, Ordering};
415 /// let some_bool = AtomicBool::new(true);
417 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
418 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
421 #[stable(feature = "rust1", since = "1.0.0")]
422 #[cfg(target_has_atomic = "cas")]
423 pub fn swap(&self, val: bool, order: Ordering) -> bool {
424 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
427 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
429 /// The return value is always the previous value. If it is equal to `current`, then the value
432 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
433 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
434 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
435 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
436 /// happens, and using [`Release`] makes the load part [`Relaxed`].
438 /// [`Ordering`]: enum.Ordering.html
439 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
440 /// [`Release`]: enum.Ordering.html#variant.Release
441 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
442 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
443 /// [`bool`]: ../../../std/primitive.bool.html
448 /// use std::sync::atomic::{AtomicBool, Ordering};
450 /// let some_bool = AtomicBool::new(true);
452 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
453 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
455 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
456 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
459 #[stable(feature = "rust1", since = "1.0.0")]
460 #[cfg(target_has_atomic = "cas")]
461 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
462 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
468 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
470 /// The return value is a result indicating whether the new value was written and containing
471 /// the previous value. On success this value is guaranteed to be equal to `current`.
473 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
474 /// ordering of this operation. The first describes the required ordering if the
475 /// operation succeeds while the second describes the required ordering when the
476 /// operation fails. Using [`Acquire`] as success ordering makes the store part
477 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
478 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
479 /// and must be equivalent to or weaker than the success ordering.
482 /// [`bool`]: ../../../std/primitive.bool.html
483 /// [`Ordering`]: enum.Ordering.html
484 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
485 /// [`Release`]: enum.Ordering.html#variant.Release
486 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
487 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
492 /// use std::sync::atomic::{AtomicBool, Ordering};
494 /// let some_bool = AtomicBool::new(true);
496 /// assert_eq!(some_bool.compare_exchange(true,
498 /// Ordering::Acquire,
499 /// Ordering::Relaxed),
501 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
503 /// assert_eq!(some_bool.compare_exchange(true, true,
504 /// Ordering::SeqCst,
505 /// Ordering::Acquire),
507 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
510 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
511 #[cfg(target_has_atomic = "cas")]
512 pub fn compare_exchange(&self,
517 -> Result<bool, bool> {
519 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
522 Err(x) => Err(x != 0),
526 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
528 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
529 /// comparison succeeds, which can result in more efficient code on some platforms. The
530 /// return value is a result indicating whether the new value was written and containing the
533 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
534 /// ordering of this operation. The first describes the required ordering if the
535 /// operation succeeds while the second describes the required ordering when the
536 /// operation fails. Using [`Acquire`] as success ordering makes the store part
537 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
538 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
539 /// and must be equivalent to or weaker than the success ordering.
541 /// [`bool`]: ../../../std/primitive.bool.html
542 /// [`compare_exchange`]: #method.compare_exchange
543 /// [`Ordering`]: enum.Ordering.html
544 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
545 /// [`Release`]: enum.Ordering.html#variant.Release
546 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
547 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
552 /// use std::sync::atomic::{AtomicBool, Ordering};
554 /// let val = AtomicBool::new(false);
557 /// let mut old = val.load(Ordering::Relaxed);
559 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
561 /// Err(x) => old = x,
566 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
567 #[cfg(target_has_atomic = "cas")]
568 pub fn compare_exchange_weak(&self,
573 -> Result<bool, bool> {
575 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
578 Err(x) => Err(x != 0),
582 /// Logical "and" with a boolean value.
584 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
585 /// the new value to the result.
587 /// Returns the previous value.
589 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
590 /// of this operation. All ordering modes are possible. Note that using
591 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
592 /// using [`Release`] makes the load part [`Relaxed`].
594 /// [`Ordering`]: enum.Ordering.html
595 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
596 /// [`Release`]: enum.Ordering.html#variant.Release
597 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
602 /// use std::sync::atomic::{AtomicBool, Ordering};
604 /// let foo = AtomicBool::new(true);
605 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
606 /// assert_eq!(foo.load(Ordering::SeqCst), false);
608 /// let foo = AtomicBool::new(true);
609 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
610 /// assert_eq!(foo.load(Ordering::SeqCst), true);
612 /// let foo = AtomicBool::new(false);
613 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
614 /// assert_eq!(foo.load(Ordering::SeqCst), false);
617 #[stable(feature = "rust1", since = "1.0.0")]
618 #[cfg(target_has_atomic = "cas")]
619 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
620 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
623 /// Logical "nand" with a boolean value.
625 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
626 /// the new value to the result.
628 /// Returns the previous value.
630 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
631 /// of this operation. All ordering modes are possible. Note that using
632 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
633 /// using [`Release`] makes the load part [`Relaxed`].
635 /// [`Ordering`]: enum.Ordering.html
636 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
637 /// [`Release`]: enum.Ordering.html#variant.Release
638 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
643 /// use std::sync::atomic::{AtomicBool, Ordering};
645 /// let foo = AtomicBool::new(true);
646 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
647 /// assert_eq!(foo.load(Ordering::SeqCst), true);
649 /// let foo = AtomicBool::new(true);
650 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
651 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
652 /// assert_eq!(foo.load(Ordering::SeqCst), false);
654 /// let foo = AtomicBool::new(false);
655 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
656 /// assert_eq!(foo.load(Ordering::SeqCst), true);
659 #[stable(feature = "rust1", since = "1.0.0")]
660 #[cfg(target_has_atomic = "cas")]
661 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
662 // We can't use atomic_nand here because it can result in a bool with
663 // an invalid value. This happens because the atomic operation is done
664 // with an 8-bit integer internally, which would set the upper 7 bits.
665 // So we just use fetch_xor or swap instead.
668 // We must invert the bool.
669 self.fetch_xor(true, order)
671 // !(x & false) == true
672 // We must set the bool to true.
673 self.swap(true, order)
677 /// Logical "or" with a boolean value.
679 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
680 /// new value to the result.
682 /// Returns the previous value.
684 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
685 /// of this operation. All ordering modes are possible. Note that using
686 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
687 /// using [`Release`] makes the load part [`Relaxed`].
689 /// [`Ordering`]: enum.Ordering.html
690 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
691 /// [`Release`]: enum.Ordering.html#variant.Release
692 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
697 /// use std::sync::atomic::{AtomicBool, Ordering};
699 /// let foo = AtomicBool::new(true);
700 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
701 /// assert_eq!(foo.load(Ordering::SeqCst), true);
703 /// let foo = AtomicBool::new(true);
704 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
705 /// assert_eq!(foo.load(Ordering::SeqCst), true);
707 /// let foo = AtomicBool::new(false);
708 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
709 /// assert_eq!(foo.load(Ordering::SeqCst), false);
712 #[stable(feature = "rust1", since = "1.0.0")]
713 #[cfg(target_has_atomic = "cas")]
714 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
715 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
718 /// Logical "xor" with a boolean value.
720 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
721 /// the new value to the result.
723 /// Returns the previous value.
725 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
726 /// of this operation. All ordering modes are possible. Note that using
727 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
728 /// using [`Release`] makes the load part [`Relaxed`].
730 /// [`Ordering`]: enum.Ordering.html
731 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
732 /// [`Release`]: enum.Ordering.html#variant.Release
733 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
738 /// use std::sync::atomic::{AtomicBool, Ordering};
740 /// let foo = AtomicBool::new(true);
741 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
742 /// assert_eq!(foo.load(Ordering::SeqCst), true);
744 /// let foo = AtomicBool::new(true);
745 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
746 /// assert_eq!(foo.load(Ordering::SeqCst), false);
748 /// let foo = AtomicBool::new(false);
749 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
750 /// assert_eq!(foo.load(Ordering::SeqCst), false);
753 #[stable(feature = "rust1", since = "1.0.0")]
754 #[cfg(target_has_atomic = "cas")]
755 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
756 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
760 #[cfg(target_has_atomic = "ptr")]
761 impl<T> AtomicPtr<T> {
762 /// Creates a new `AtomicPtr`.
767 /// use std::sync::atomic::AtomicPtr;
769 /// let ptr = &mut 5;
770 /// let atomic_ptr = AtomicPtr::new(ptr);
773 #[stable(feature = "rust1", since = "1.0.0")]
774 pub const fn new(p: *mut T) -> AtomicPtr<T> {
775 AtomicPtr { p: UnsafeCell::new(p) }
778 /// Returns a mutable reference to the underlying pointer.
780 /// This is safe because the mutable reference guarantees that no other threads are
781 /// concurrently accessing the atomic data.
786 /// use std::sync::atomic::{AtomicPtr, Ordering};
788 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
789 /// *atomic_ptr.get_mut() = &mut 5;
790 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
793 #[stable(feature = "atomic_access", since = "1.15.0")]
794 pub fn get_mut(&mut self) -> &mut *mut T {
795 unsafe { &mut *self.p.get() }
798 /// Consumes the atomic and returns the contained value.
800 /// This is safe because passing `self` by value guarantees that no other threads are
801 /// concurrently accessing the atomic data.
806 /// use std::sync::atomic::AtomicPtr;
808 /// let atomic_ptr = AtomicPtr::new(&mut 5);
809 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
812 #[stable(feature = "atomic_access", since = "1.15.0")]
813 pub fn into_inner(self) -> *mut T {
817 /// Loads a value from the pointer.
819 /// `load` takes an [`Ordering`] argument which describes the memory ordering
820 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
824 /// Panics if `order` is [`Release`] or [`AcqRel`].
826 /// [`Ordering`]: enum.Ordering.html
827 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
828 /// [`Release`]: enum.Ordering.html#variant.Release
829 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
830 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
831 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
836 /// use std::sync::atomic::{AtomicPtr, Ordering};
838 /// let ptr = &mut 5;
839 /// let some_ptr = AtomicPtr::new(ptr);
841 /// let value = some_ptr.load(Ordering::Relaxed);
844 #[stable(feature = "rust1", since = "1.0.0")]
845 pub fn load(&self, order: Ordering) -> *mut T {
846 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
849 /// Stores a value into the pointer.
851 /// `store` takes an [`Ordering`] argument which describes the memory ordering
852 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
856 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
858 /// [`Ordering`]: enum.Ordering.html
859 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
860 /// [`Release`]: enum.Ordering.html#variant.Release
861 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
862 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
863 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
868 /// use std::sync::atomic::{AtomicPtr, Ordering};
870 /// let ptr = &mut 5;
871 /// let some_ptr = AtomicPtr::new(ptr);
873 /// let other_ptr = &mut 10;
875 /// some_ptr.store(other_ptr, Ordering::Relaxed);
878 #[stable(feature = "rust1", since = "1.0.0")]
879 pub fn store(&self, ptr: *mut T, order: Ordering) {
881 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
885 /// Stores a value into the pointer, returning the previous value.
887 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
888 /// of this operation. All ordering modes are possible. Note that using
889 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
890 /// using [`Release`] makes the load part [`Relaxed`].
892 /// [`Ordering`]: enum.Ordering.html
893 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
894 /// [`Release`]: enum.Ordering.html#variant.Release
895 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
900 /// use std::sync::atomic::{AtomicPtr, Ordering};
902 /// let ptr = &mut 5;
903 /// let some_ptr = AtomicPtr::new(ptr);
905 /// let other_ptr = &mut 10;
907 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
910 #[stable(feature = "rust1", since = "1.0.0")]
911 #[cfg(target_has_atomic = "cas")]
912 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
913 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
916 /// Stores a value into the pointer if the current value is the same as the `current` value.
918 /// The return value is always the previous value. If it is equal to `current`, then the value
921 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
922 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
923 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
924 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
925 /// happens, and using [`Release`] makes the load part [`Relaxed`].
927 /// [`Ordering`]: enum.Ordering.html
928 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
929 /// [`Release`]: enum.Ordering.html#variant.Release
930 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
931 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
936 /// use std::sync::atomic::{AtomicPtr, Ordering};
938 /// let ptr = &mut 5;
939 /// let some_ptr = AtomicPtr::new(ptr);
941 /// let other_ptr = &mut 10;
942 /// let another_ptr = &mut 10;
944 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
947 #[stable(feature = "rust1", since = "1.0.0")]
948 #[cfg(target_has_atomic = "cas")]
949 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
950 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
956 /// Stores a value into the pointer if the current value is the same as the `current` value.
958 /// The return value is a result indicating whether the new value was written and containing
959 /// the previous value. On success this value is guaranteed to be equal to `current`.
961 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
962 /// ordering of this operation. The first describes the required ordering if the
963 /// operation succeeds while the second describes the required ordering when the
964 /// operation fails. Using [`Acquire`] as success ordering makes the store part
965 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
966 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
967 /// and must be equivalent to or weaker than the success ordering.
969 /// [`Ordering`]: enum.Ordering.html
970 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
971 /// [`Release`]: enum.Ordering.html#variant.Release
972 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
973 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
978 /// use std::sync::atomic::{AtomicPtr, Ordering};
980 /// let ptr = &mut 5;
981 /// let some_ptr = AtomicPtr::new(ptr);
983 /// let other_ptr = &mut 10;
984 /// let another_ptr = &mut 10;
986 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
987 /// Ordering::SeqCst, Ordering::Relaxed);
990 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
991 #[cfg(target_has_atomic = "cas")]
992 pub fn compare_exchange(&self,
997 -> Result<*mut T, *mut T> {
999 let res = atomic_compare_exchange(self.p.get() as *mut usize,
1005 Ok(x) => Ok(x as *mut T),
1006 Err(x) => Err(x as *mut T),
1011 /// Stores a value into the pointer if the current value is the same as the `current` value.
1013 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1014 /// comparison succeeds, which can result in more efficient code on some platforms. The
1015 /// return value is a result indicating whether the new value was written and containing the
1018 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1019 /// ordering of this operation. The first describes the required ordering if the
1020 /// operation succeeds while the second describes the required ordering when the
1021 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1022 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1023 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1024 /// and must be equivalent to or weaker than the success ordering.
1026 /// [`compare_exchange`]: #method.compare_exchange
1027 /// [`Ordering`]: enum.Ordering.html
1028 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1029 /// [`Release`]: enum.Ordering.html#variant.Release
1030 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1031 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1036 /// use std::sync::atomic::{AtomicPtr, Ordering};
1038 /// let some_ptr = AtomicPtr::new(&mut 5);
1040 /// let new = &mut 10;
1041 /// let mut old = some_ptr.load(Ordering::Relaxed);
1043 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1045 /// Err(x) => old = x,
1050 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1051 #[cfg(target_has_atomic = "cas")]
1052 pub fn compare_exchange_weak(&self,
1057 -> Result<*mut T, *mut T> {
1059 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
1065 Ok(x) => Ok(x as *mut T),
1066 Err(x) => Err(x as *mut T),
1072 #[cfg(target_has_atomic = "8")]
1073 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1074 impl From<bool> for AtomicBool {
1076 fn from(b: bool) -> Self { Self::new(b) }
1079 #[cfg(target_has_atomic = "ptr")]
1080 #[stable(feature = "atomic_from", since = "1.23.0")]
1081 impl<T> From<*mut T> for AtomicPtr<T> {
1083 fn from(p: *mut T) -> Self { Self::new(p) }
1086 #[cfg(target_has_atomic = "ptr")]
1087 macro_rules! atomic_int {
1091 $stable_access:meta,
1094 $s_int_type:expr, $int_ref:expr,
1095 $extra_feature:expr,
1096 $min_fn:ident, $max_fn:ident,
1098 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1099 /// An integer type which can be safely shared between threads.
1101 /// This type has the same in-memory representation as the underlying
1102 /// integer type, [`
1103 #[doc = $s_int_type]
1106 /// ). For more about the differences between atomic types and
1107 /// non-atomic types, please see the [module-level documentation].
1109 /// [module-level documentation]: index.html
1111 #[repr(C, align($align))]
1112 pub struct $atomic_type {
1113 v: UnsafeCell<$int_type>,
1116 /// An atomic integer initialized to `0`.
1118 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1121 impl Default for $atomic_type {
1122 fn default() -> Self {
1123 Self::new(Default::default())
1128 impl From<$int_type> for $atomic_type {
1130 fn from(v: $int_type) -> Self { Self::new(v) }
1134 impl fmt::Debug for $atomic_type {
1135 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1136 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
1140 // Send is implicitly implemented.
1142 unsafe impl Sync for $atomic_type {}
1146 concat!("Creates a new atomic integer.
1151 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1153 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1157 pub const fn new(v: $int_type) -> Self {
1158 $atomic_type {v: UnsafeCell::new(v)}
1163 concat!("Returns a mutable reference to the underlying integer.
1165 This is safe because the mutable reference guarantees that no other threads are
1166 concurrently accessing the atomic data.
1171 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1173 let mut some_var = ", stringify!($atomic_type), "::new(10);
1174 assert_eq!(*some_var.get_mut(), 10);
1175 *some_var.get_mut() = 5;
1176 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1180 pub fn get_mut(&mut self) -> &mut $int_type {
1181 unsafe { &mut *self.v.get() }
1186 concat!("Consumes the atomic and returns the contained value.
1188 This is safe because passing `self` by value guarantees that no other threads are
1189 concurrently accessing the atomic data.
1194 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1196 let some_var = ", stringify!($atomic_type), "::new(5);
1197 assert_eq!(some_var.into_inner(), 5);
1201 pub fn into_inner(self) -> $int_type {
1207 concat!("Loads a value from the atomic integer.
1209 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1210 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1214 Panics if `order` is [`Release`] or [`AcqRel`].
1216 [`Ordering`]: enum.Ordering.html
1217 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1218 [`Release`]: enum.Ordering.html#variant.Release
1219 [`Acquire`]: enum.Ordering.html#variant.Acquire
1220 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1221 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1226 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1228 let some_var = ", stringify!($atomic_type), "::new(5);
1230 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1234 pub fn load(&self, order: Ordering) -> $int_type {
1235 unsafe { atomic_load(self.v.get(), order) }
1240 concat!("Stores a value into the atomic integer.
1242 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1243 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1247 Panics if `order` is [`Acquire`] or [`AcqRel`].
1249 [`Ordering`]: enum.Ordering.html
1250 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1251 [`Release`]: enum.Ordering.html#variant.Release
1252 [`Acquire`]: enum.Ordering.html#variant.Acquire
1253 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1254 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1259 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1261 let some_var = ", stringify!($atomic_type), "::new(5);
1263 some_var.store(10, Ordering::Relaxed);
1264 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1268 pub fn store(&self, val: $int_type, order: Ordering) {
1269 unsafe { atomic_store(self.v.get(), val, order); }
1274 concat!("Stores a value into the atomic integer, returning the previous value.
1276 `swap` takes an [`Ordering`] argument which describes the memory ordering
1277 of this operation. All ordering modes are possible. Note that using
1278 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1279 using [`Release`] makes the load part [`Relaxed`].
1281 [`Ordering`]: enum.Ordering.html
1282 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1283 [`Release`]: enum.Ordering.html#variant.Release
1284 [`Acquire`]: enum.Ordering.html#variant.Acquire
1289 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1291 let some_var = ", stringify!($atomic_type), "::new(5);
1293 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1297 #[cfg(target_has_atomic = "cas")]
1298 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1299 unsafe { atomic_swap(self.v.get(), val, order) }
1304 concat!("Stores a value into the atomic integer if the current value is the same as
1305 the `current` value.
1307 The return value is always the previous value. If it is equal to `current`, then the
1310 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1311 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1312 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1313 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1314 happens, and using [`Release`] makes the load part [`Relaxed`].
1316 [`Ordering`]: enum.Ordering.html
1317 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1318 [`Release`]: enum.Ordering.html#variant.Release
1319 [`Acquire`]: enum.Ordering.html#variant.Acquire
1320 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1325 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1327 let some_var = ", stringify!($atomic_type), "::new(5);
1329 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1330 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1332 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1333 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1337 #[cfg(target_has_atomic = "cas")]
1338 pub fn compare_and_swap(&self,
1341 order: Ordering) -> $int_type {
1342 match self.compare_exchange(current,
1345 strongest_failure_ordering(order)) {
1353 concat!("Stores a value into the atomic integer if the current value is the same as
1354 the `current` value.
1356 The return value is a result indicating whether the new value was written and
1357 containing the previous value. On success this value is guaranteed to be equal to
1360 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1361 ordering of this operation. The first describes the required ordering if the
1362 operation succeeds while the second describes the required ordering when the
1363 operation fails. Using [`Acquire`] as success ordering makes the store part
1364 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1365 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1366 and must be equivalent to or weaker than the success ordering.
1368 [`Ordering`]: enum.Ordering.html
1369 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1370 [`Release`]: enum.Ordering.html#variant.Release
1371 [`Acquire`]: enum.Ordering.html#variant.Acquire
1372 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1377 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1379 let some_var = ", stringify!($atomic_type), "::new(5);
1381 assert_eq!(some_var.compare_exchange(5, 10,
1385 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1387 assert_eq!(some_var.compare_exchange(6, 12,
1391 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1395 #[cfg(target_has_atomic = "cas")]
1396 pub fn compare_exchange(&self,
1400 failure: Ordering) -> Result<$int_type, $int_type> {
1401 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1406 concat!("Stores a value into the atomic integer if the current value is the same as
1407 the `current` value.
1409 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1410 when the comparison succeeds, which can result in more efficient code on some
1411 platforms. The return value is a result indicating whether the new value was
1412 written and containing the previous value.
1414 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1415 ordering of this operation. The first describes the required ordering if the
1416 operation succeeds while the second describes the required ordering when the
1417 operation fails. Using [`Acquire`] as success ordering makes the store part
1418 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1419 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1420 and must be equivalent to or weaker than the success ordering.
1422 [`compare_exchange`]: #method.compare_exchange
1423 [`Ordering`]: enum.Ordering.html
1424 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1425 [`Release`]: enum.Ordering.html#variant.Release
1426 [`Acquire`]: enum.Ordering.html#variant.Acquire
1427 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1432 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1434 let val = ", stringify!($atomic_type), "::new(4);
1436 let mut old = val.load(Ordering::Relaxed);
1439 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1447 #[cfg(target_has_atomic = "cas")]
1448 pub fn compare_exchange_weak(&self,
1452 failure: Ordering) -> Result<$int_type, $int_type> {
1454 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1460 concat!("Adds to the current value, returning the previous value.
1462 This operation wraps around on overflow.
1464 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1465 of this operation. All ordering modes are possible. Note that using
1466 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1467 using [`Release`] makes the load part [`Relaxed`].
1469 [`Ordering`]: enum.Ordering.html
1470 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1471 [`Release`]: enum.Ordering.html#variant.Release
1472 [`Acquire`]: enum.Ordering.html#variant.Acquire
1477 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1479 let foo = ", stringify!($atomic_type), "::new(0);
1480 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1481 assert_eq!(foo.load(Ordering::SeqCst), 10);
1485 #[cfg(target_has_atomic = "cas")]
1486 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1487 unsafe { atomic_add(self.v.get(), val, order) }
1492 concat!("Subtracts from the current value, returning the previous value.
1494 This operation wraps around on overflow.
1496 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1497 of this operation. All ordering modes are possible. Note that using
1498 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1499 using [`Release`] makes the load part [`Relaxed`].
1501 [`Ordering`]: enum.Ordering.html
1502 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1503 [`Release`]: enum.Ordering.html#variant.Release
1504 [`Acquire`]: enum.Ordering.html#variant.Acquire
1509 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1511 let foo = ", stringify!($atomic_type), "::new(20);
1512 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1513 assert_eq!(foo.load(Ordering::SeqCst), 10);
1517 #[cfg(target_has_atomic = "cas")]
1518 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1519 unsafe { atomic_sub(self.v.get(), val, order) }
1524 concat!("Bitwise \"and\" with the current value.
1526 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1527 sets the new value to the result.
1529 Returns the previous value.
1531 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1532 of this operation. All ordering modes are possible. Note that using
1533 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1534 using [`Release`] makes the load part [`Relaxed`].
1536 [`Ordering`]: enum.Ordering.html
1537 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1538 [`Release`]: enum.Ordering.html#variant.Release
1539 [`Acquire`]: enum.Ordering.html#variant.Acquire
1544 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1546 let foo = ", stringify!($atomic_type), "::new(0b101101);
1547 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1548 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1552 #[cfg(target_has_atomic = "cas")]
1553 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1554 unsafe { atomic_and(self.v.get(), val, order) }
1559 concat!("Bitwise \"nand\" with the current value.
1561 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1562 sets the new value to the result.
1564 Returns the previous value.
1566 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1567 of this operation. All ordering modes are possible. Note that using
1568 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1569 using [`Release`] makes the load part [`Relaxed`].
1571 [`Ordering`]: enum.Ordering.html
1572 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1573 [`Release`]: enum.Ordering.html#variant.Release
1574 [`Acquire`]: enum.Ordering.html#variant.Acquire
1579 ", $extra_feature, "
1580 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1582 let foo = ", stringify!($atomic_type), "::new(0x13);
1583 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1584 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1588 #[cfg(target_has_atomic = "cas")]
1589 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1590 unsafe { atomic_nand(self.v.get(), val, order) }
1595 concat!("Bitwise \"or\" with the current value.
1597 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1598 sets the new value to the result.
1600 Returns the previous value.
1602 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1603 of this operation. All ordering modes are possible. Note that using
1604 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1605 using [`Release`] makes the load part [`Relaxed`].
1607 [`Ordering`]: enum.Ordering.html
1608 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1609 [`Release`]: enum.Ordering.html#variant.Release
1610 [`Acquire`]: enum.Ordering.html#variant.Acquire
1615 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1617 let foo = ", stringify!($atomic_type), "::new(0b101101);
1618 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1619 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1623 #[cfg(target_has_atomic = "cas")]
1624 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1625 unsafe { atomic_or(self.v.get(), val, order) }
1630 concat!("Bitwise \"xor\" with the current value.
1632 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1633 sets the new value to the result.
1635 Returns the previous value.
1637 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1638 of this operation. All ordering modes are possible. Note that using
1639 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1640 using [`Release`] makes the load part [`Relaxed`].
1642 [`Ordering`]: enum.Ordering.html
1643 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1644 [`Release`]: enum.Ordering.html#variant.Release
1645 [`Acquire`]: enum.Ordering.html#variant.Acquire
1650 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1652 let foo = ", stringify!($atomic_type), "::new(0b101101);
1653 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1654 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1658 #[cfg(target_has_atomic = "cas")]
1659 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1660 unsafe { atomic_xor(self.v.get(), val, order) }
1665 concat!("Fetches the value, and applies a function to it that returns an optional
1666 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1667 `Err(previous_value)`.
1669 Note: This may call the function multiple times if the value has been changed from other threads in
1670 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1671 but once to the stored value.
1673 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1674 ordering of this operation. The first describes the required ordering for loads
1675 and failed updates while the second describes the required ordering when the
1676 operation finally succeeds. Beware that this is different from the two
1677 modes in [`compare_exchange`]!
1679 Using [`Acquire`] as success ordering makes the store part
1680 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1681 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1682 and must be equivalent to or weaker than the success ordering.
1684 [`bool`]: ../../../std/primitive.bool.html
1685 [`compare_exchange`]: #method.compare_exchange
1686 [`Ordering`]: enum.Ordering.html
1687 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1688 [`Release`]: enum.Ordering.html#variant.Release
1689 [`Acquire`]: enum.Ordering.html#variant.Acquire
1690 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1695 #![feature(no_more_cas)]
1696 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1698 let x = ", stringify!($atomic_type), "::new(7);
1699 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1700 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1701 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1702 assert_eq!(x.load(Ordering::SeqCst), 9);
1705 #[unstable(feature = "no_more_cas",
1706 reason = "no more CAS loops in user code",
1708 #[cfg(target_has_atomic = "cas")]
1709 pub fn fetch_update<F>(&self,
1711 fetch_order: Ordering,
1712 set_order: Ordering) -> Result<$int_type, $int_type>
1713 where F: FnMut($int_type) -> Option<$int_type> {
1714 let mut prev = self.load(fetch_order);
1715 while let Some(next) = f(prev) {
1716 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1717 x @ Ok(_) => return x,
1718 Err(next_prev) => prev = next_prev
1726 concat!("Maximum with the current value.
1728 Finds the maximum of the current value and the argument `val`, and
1729 sets the new value to the result.
1731 Returns the previous value.
1733 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1734 of this operation. All ordering modes are possible. Note that using
1735 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1736 using [`Release`] makes the load part [`Relaxed`].
1738 [`Ordering`]: enum.Ordering.html
1739 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1740 [`Release`]: enum.Ordering.html#variant.Release
1741 [`Acquire`]: enum.Ordering.html#variant.Acquire
1746 #![feature(atomic_min_max)]
1747 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1749 let foo = ", stringify!($atomic_type), "::new(23);
1750 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1751 assert_eq!(foo.load(Ordering::SeqCst), 42);
1754 If you want to obtain the maximum value in one step, you can use the following:
1757 #![feature(atomic_min_max)]
1758 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1760 let foo = ", stringify!($atomic_type), "::new(23);
1762 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1763 assert!(max_foo == 42);
1766 #[unstable(feature = "atomic_min_max",
1767 reason = "easier and faster min/max than writing manual CAS loop",
1769 #[cfg(target_has_atomic = "cas")]
1770 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1771 unsafe { $max_fn(self.v.get(), val, order) }
1776 concat!("Minimum with the current value.
1778 Finds the minimum of the current value and the argument `val`, and
1779 sets the new value to the result.
1781 Returns the previous value.
1783 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1784 of this operation. All ordering modes are possible. Note that using
1785 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1786 using [`Release`] makes the load part [`Relaxed`].
1788 [`Ordering`]: enum.Ordering.html
1789 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1790 [`Release`]: enum.Ordering.html#variant.Release
1791 [`Acquire`]: enum.Ordering.html#variant.Acquire
1796 #![feature(atomic_min_max)]
1797 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1799 let foo = ", stringify!($atomic_type), "::new(23);
1800 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1801 assert_eq!(foo.load(Ordering::Relaxed), 23);
1802 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1803 assert_eq!(foo.load(Ordering::Relaxed), 22);
1806 If you want to obtain the minimum value in one step, you can use the following:
1809 #![feature(atomic_min_max)]
1810 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1812 let foo = ", stringify!($atomic_type), "::new(23);
1814 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1815 assert_eq!(min_foo, 12);
1818 #[unstable(feature = "atomic_min_max",
1819 reason = "easier and faster min/max than writing manual CAS loop",
1821 #[cfg(target_has_atomic = "cas")]
1822 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1823 unsafe { $min_fn(self.v.get(), val, order) }
1831 #[cfg(target_has_atomic = "8")]
1833 unstable(feature = "integer_atomics", issue = "32976"),
1834 unstable(feature = "integer_atomics", issue = "32976"),
1835 unstable(feature = "integer_atomics", issue = "32976"),
1836 unstable(feature = "integer_atomics", issue = "32976"),
1837 unstable(feature = "integer_atomics", issue = "32976"),
1838 unstable(feature = "integer_atomics", issue = "32976"),
1839 "i8", "../../../std/primitive.i8.html",
1840 "#![feature(integer_atomics)]\n\n",
1841 atomic_min, atomic_max,
1843 i8 AtomicI8 ATOMIC_I8_INIT
1845 #[cfg(target_has_atomic = "8")]
1847 unstable(feature = "integer_atomics", issue = "32976"),
1848 unstable(feature = "integer_atomics", issue = "32976"),
1849 unstable(feature = "integer_atomics", issue = "32976"),
1850 unstable(feature = "integer_atomics", issue = "32976"),
1851 unstable(feature = "integer_atomics", issue = "32976"),
1852 unstable(feature = "integer_atomics", issue = "32976"),
1853 "u8", "../../../std/primitive.u8.html",
1854 "#![feature(integer_atomics)]\n\n",
1855 atomic_umin, atomic_umax,
1857 u8 AtomicU8 ATOMIC_U8_INIT
1859 #[cfg(target_has_atomic = "16")]
1861 unstable(feature = "integer_atomics", issue = "32976"),
1862 unstable(feature = "integer_atomics", issue = "32976"),
1863 unstable(feature = "integer_atomics", issue = "32976"),
1864 unstable(feature = "integer_atomics", issue = "32976"),
1865 unstable(feature = "integer_atomics", issue = "32976"),
1866 unstable(feature = "integer_atomics", issue = "32976"),
1867 "i16", "../../../std/primitive.i16.html",
1868 "#![feature(integer_atomics)]\n\n",
1869 atomic_min, atomic_max,
1871 i16 AtomicI16 ATOMIC_I16_INIT
1873 #[cfg(target_has_atomic = "16")]
1875 unstable(feature = "integer_atomics", issue = "32976"),
1876 unstable(feature = "integer_atomics", issue = "32976"),
1877 unstable(feature = "integer_atomics", issue = "32976"),
1878 unstable(feature = "integer_atomics", issue = "32976"),
1879 unstable(feature = "integer_atomics", issue = "32976"),
1880 unstable(feature = "integer_atomics", issue = "32976"),
1881 "u16", "../../../std/primitive.u16.html",
1882 "#![feature(integer_atomics)]\n\n",
1883 atomic_umin, atomic_umax,
1885 u16 AtomicU16 ATOMIC_U16_INIT
1887 #[cfg(target_has_atomic = "32")]
1889 unstable(feature = "integer_atomics", issue = "32976"),
1890 unstable(feature = "integer_atomics", issue = "32976"),
1891 unstable(feature = "integer_atomics", issue = "32976"),
1892 unstable(feature = "integer_atomics", issue = "32976"),
1893 unstable(feature = "integer_atomics", issue = "32976"),
1894 unstable(feature = "integer_atomics", issue = "32976"),
1895 "i32", "../../../std/primitive.i32.html",
1896 "#![feature(integer_atomics)]\n\n",
1897 atomic_min, atomic_max,
1899 i32 AtomicI32 ATOMIC_I32_INIT
1901 #[cfg(target_has_atomic = "32")]
1903 unstable(feature = "integer_atomics", issue = "32976"),
1904 unstable(feature = "integer_atomics", issue = "32976"),
1905 unstable(feature = "integer_atomics", issue = "32976"),
1906 unstable(feature = "integer_atomics", issue = "32976"),
1907 unstable(feature = "integer_atomics", issue = "32976"),
1908 unstable(feature = "integer_atomics", issue = "32976"),
1909 "u32", "../../../std/primitive.u32.html",
1910 "#![feature(integer_atomics)]\n\n",
1911 atomic_umin, atomic_umax,
1913 u32 AtomicU32 ATOMIC_U32_INIT
1915 #[cfg(target_has_atomic = "64")]
1917 unstable(feature = "integer_atomics", issue = "32976"),
1918 unstable(feature = "integer_atomics", issue = "32976"),
1919 unstable(feature = "integer_atomics", issue = "32976"),
1920 unstable(feature = "integer_atomics", issue = "32976"),
1921 unstable(feature = "integer_atomics", issue = "32976"),
1922 unstable(feature = "integer_atomics", issue = "32976"),
1923 "i64", "../../../std/primitive.i64.html",
1924 "#![feature(integer_atomics)]\n\n",
1925 atomic_min, atomic_max,
1927 i64 AtomicI64 ATOMIC_I64_INIT
1929 #[cfg(target_has_atomic = "64")]
1931 unstable(feature = "integer_atomics", issue = "32976"),
1932 unstable(feature = "integer_atomics", issue = "32976"),
1933 unstable(feature = "integer_atomics", issue = "32976"),
1934 unstable(feature = "integer_atomics", issue = "32976"),
1935 unstable(feature = "integer_atomics", issue = "32976"),
1936 unstable(feature = "integer_atomics", issue = "32976"),
1937 "u64", "../../../std/primitive.u64.html",
1938 "#![feature(integer_atomics)]\n\n",
1939 atomic_umin, atomic_umax,
1941 u64 AtomicU64 ATOMIC_U64_INIT
1943 #[cfg(target_has_atomic = "128")]
1945 unstable(feature = "integer_atomics", issue = "32976"),
1946 unstable(feature = "integer_atomics", issue = "32976"),
1947 unstable(feature = "integer_atomics", issue = "32976"),
1948 unstable(feature = "integer_atomics", issue = "32976"),
1949 unstable(feature = "integer_atomics", issue = "32976"),
1950 unstable(feature = "integer_atomics", issue = "32976"),
1951 "i128", "../../../std/primitive.i128.html",
1952 "#![feature(integer_atomics)]\n\n",
1953 atomic_min, atomic_max,
1955 i128 AtomicI128 ATOMIC_I128_INIT
1957 #[cfg(target_has_atomic = "128")]
1959 unstable(feature = "integer_atomics", issue = "32976"),
1960 unstable(feature = "integer_atomics", issue = "32976"),
1961 unstable(feature = "integer_atomics", issue = "32976"),
1962 unstable(feature = "integer_atomics", issue = "32976"),
1963 unstable(feature = "integer_atomics", issue = "32976"),
1964 unstable(feature = "integer_atomics", issue = "32976"),
1965 "u128", "../../../std/primitive.u128.html",
1966 "#![feature(integer_atomics)]\n\n",
1967 atomic_umin, atomic_umax,
1969 u128 AtomicU128 ATOMIC_U128_INIT
1971 #[cfg(target_pointer_width = "16")]
1972 macro_rules! ptr_width {
1975 #[cfg(target_pointer_width = "32")]
1976 macro_rules! ptr_width {
1979 #[cfg(target_pointer_width = "64")]
1980 macro_rules! ptr_width {
1983 #[cfg(target_has_atomic = "ptr")]
1985 stable(feature = "rust1", since = "1.0.0"),
1986 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1987 stable(feature = "atomic_debug", since = "1.3.0"),
1988 stable(feature = "atomic_access", since = "1.15.0"),
1989 stable(feature = "atomic_from", since = "1.23.0"),
1990 stable(feature = "atomic_nand", since = "1.27.0"),
1991 "isize", "../../../std/primitive.isize.html",
1993 atomic_min, atomic_max,
1995 isize AtomicIsize ATOMIC_ISIZE_INIT
1997 #[cfg(target_has_atomic = "ptr")]
1999 stable(feature = "rust1", since = "1.0.0"),
2000 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2001 stable(feature = "atomic_debug", since = "1.3.0"),
2002 stable(feature = "atomic_access", since = "1.15.0"),
2003 stable(feature = "atomic_from", since = "1.23.0"),
2004 stable(feature = "atomic_nand", since = "1.27.0"),
2005 "usize", "../../../std/primitive.usize.html",
2007 atomic_umin, atomic_umax,
2009 usize AtomicUsize ATOMIC_USIZE_INIT
2013 #[cfg(target_has_atomic = "cas")]
2014 fn strongest_failure_ordering(order: Ordering) -> Ordering {
2025 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
2027 Release => intrinsics::atomic_store_rel(dst, val),
2028 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2029 SeqCst => intrinsics::atomic_store(dst, val),
2030 Acquire => panic!("there is no such thing as an acquire store"),
2031 AcqRel => panic!("there is no such thing as an acquire/release store"),
2036 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
2038 Acquire => intrinsics::atomic_load_acq(dst),
2039 Relaxed => intrinsics::atomic_load_relaxed(dst),
2040 SeqCst => intrinsics::atomic_load(dst),
2041 Release => panic!("there is no such thing as a release load"),
2042 AcqRel => panic!("there is no such thing as an acquire/release load"),
2047 #[cfg(target_has_atomic = "cas")]
2048 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
2050 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2051 Release => intrinsics::atomic_xchg_rel(dst, val),
2052 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2053 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2054 SeqCst => intrinsics::atomic_xchg(dst, val),
2058 /// Returns the previous value (like __sync_fetch_and_add).
2060 #[cfg(target_has_atomic = "cas")]
2061 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
2063 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2064 Release => intrinsics::atomic_xadd_rel(dst, val),
2065 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2066 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2067 SeqCst => intrinsics::atomic_xadd(dst, val),
2071 /// Returns the previous value (like __sync_fetch_and_sub).
2073 #[cfg(target_has_atomic = "cas")]
2074 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
2076 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2077 Release => intrinsics::atomic_xsub_rel(dst, val),
2078 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2079 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2080 SeqCst => intrinsics::atomic_xsub(dst, val),
2085 #[cfg(target_has_atomic = "cas")]
2086 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
2092 let (val, ok) = match (success, failure) {
2093 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2094 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2095 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2096 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2097 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2098 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2099 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2100 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2101 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2102 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2103 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2104 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2106 if ok { Ok(val) } else { Err(val) }
2110 #[cfg(target_has_atomic = "cas")]
2111 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
2117 let (val, ok) = match (success, failure) {
2118 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2119 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2120 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2121 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2122 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2123 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2124 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2125 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2126 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2127 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2128 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2129 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2131 if ok { Ok(val) } else { Err(val) }
2135 #[cfg(target_has_atomic = "cas")]
2136 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
2138 Acquire => intrinsics::atomic_and_acq(dst, val),
2139 Release => intrinsics::atomic_and_rel(dst, val),
2140 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2141 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2142 SeqCst => intrinsics::atomic_and(dst, val),
2147 #[cfg(target_has_atomic = "cas")]
2148 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
2150 Acquire => intrinsics::atomic_nand_acq(dst, val),
2151 Release => intrinsics::atomic_nand_rel(dst, val),
2152 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2153 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2154 SeqCst => intrinsics::atomic_nand(dst, val),
2159 #[cfg(target_has_atomic = "cas")]
2160 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
2162 Acquire => intrinsics::atomic_or_acq(dst, val),
2163 Release => intrinsics::atomic_or_rel(dst, val),
2164 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2165 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2166 SeqCst => intrinsics::atomic_or(dst, val),
2171 #[cfg(target_has_atomic = "cas")]
2172 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
2174 Acquire => intrinsics::atomic_xor_acq(dst, val),
2175 Release => intrinsics::atomic_xor_rel(dst, val),
2176 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2177 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2178 SeqCst => intrinsics::atomic_xor(dst, val),
2182 /// returns the max value (signed comparison)
2184 #[cfg(target_has_atomic = "cas")]
2185 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
2187 Acquire => intrinsics::atomic_max_acq(dst, val),
2188 Release => intrinsics::atomic_max_rel(dst, val),
2189 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2190 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2191 SeqCst => intrinsics::atomic_max(dst, val),
2195 /// returns the min value (signed comparison)
2197 #[cfg(target_has_atomic = "cas")]
2198 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
2200 Acquire => intrinsics::atomic_min_acq(dst, val),
2201 Release => intrinsics::atomic_min_rel(dst, val),
2202 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2203 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2204 SeqCst => intrinsics::atomic_min(dst, val),
2208 /// returns the max value (signed comparison)
2210 #[cfg(target_has_atomic = "cas")]
2211 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
2213 Acquire => intrinsics::atomic_umax_acq(dst, val),
2214 Release => intrinsics::atomic_umax_rel(dst, val),
2215 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2216 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2217 SeqCst => intrinsics::atomic_umax(dst, val),
2221 /// returns the min value (signed comparison)
2223 #[cfg(target_has_atomic = "cas")]
2224 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
2226 Acquire => intrinsics::atomic_umin_acq(dst, val),
2227 Release => intrinsics::atomic_umin_rel(dst, val),
2228 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2229 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2230 SeqCst => intrinsics::atomic_umin(dst, val),
2234 /// An atomic fence.
2236 /// Depending on the specified order, a fence prevents the compiler and CPU from
2237 /// reordering certain types of memory operations around it.
2238 /// That creates synchronizes-with relationships between it and atomic operations
2239 /// or fences in other threads.
2241 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2242 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2243 /// exist operations X and Y, both operating on some atomic object 'M' such
2244 /// that A is sequenced before X, Y is synchronized before B and Y observes
2245 /// the change to M. This provides a happens-before dependence between A and B.
2248 /// Thread 1 Thread 2
2250 /// fence(Release); A --------------
2251 /// x.store(3, Relaxed); X --------- |
2254 /// -------------> Y if x.load(Relaxed) == 3 {
2255 /// |-------> B fence(Acquire);
2260 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2263 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2264 /// and [`Release`] semantics, participates in the global program order of the
2265 /// other [`SeqCst`] operations and/or fences.
2267 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2271 /// Panics if `order` is [`Relaxed`].
2276 /// use std::sync::atomic::AtomicBool;
2277 /// use std::sync::atomic::fence;
2278 /// use std::sync::atomic::Ordering;
2280 /// // A mutual exclusion primitive based on spinlock.
2281 /// pub struct Mutex {
2282 /// flag: AtomicBool,
2286 /// pub fn new() -> Mutex {
2288 /// flag: AtomicBool::new(false),
2292 /// pub fn lock(&self) {
2293 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2294 /// // This fence synchronizes-with store in `unlock`.
2295 /// fence(Ordering::Acquire);
2298 /// pub fn unlock(&self) {
2299 /// self.flag.store(false, Ordering::Release);
2304 /// [`Ordering`]: enum.Ordering.html
2305 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2306 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2307 /// [`Release`]: enum.Ordering.html#variant.Release
2308 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2309 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2311 #[stable(feature = "rust1", since = "1.0.0")]
2312 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2313 pub fn fence(order: Ordering) {
2314 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2315 // they will cause LLVM to abort. The wasm instruction set doesn't have
2316 // fences right now. There's discussion online about the best way for tools
2317 // to conventionally implement fences at
2318 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2319 // follow that discussion and implement a solution when one comes about!
2320 #[cfg(not(target_arch = "wasm32"))]
2323 Acquire => intrinsics::atomic_fence_acq(),
2324 Release => intrinsics::atomic_fence_rel(),
2325 AcqRel => intrinsics::atomic_fence_acqrel(),
2326 SeqCst => intrinsics::atomic_fence(),
2327 Relaxed => panic!("there is no such thing as a relaxed fence"),
2333 /// A compiler memory fence.
2335 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2336 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2337 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2338 /// or writes from before or after the call to the other side of the call to
2339 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2340 /// from doing such re-ordering. This is not a problem in a single-threaded,
2341 /// execution context, but when other threads may modify memory at the same
2342 /// time, stronger synchronization primitives such as [`fence`] are required.
2344 /// The re-ordering prevented by the different ordering semantics are:
2346 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2347 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2348 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2349 /// - with [`AcqRel`], both of the above rules are enforced.
2351 /// `compiler_fence` is generally only useful for preventing a thread from
2352 /// racing *with itself*. That is, if a given thread is executing one piece
2353 /// of code, and is then interrupted, and starts executing code elsewhere
2354 /// (while still in the same thread, and conceptually still on the same
2355 /// core). In traditional programs, this can only occur when a signal
2356 /// handler is registered. In more low-level code, such situations can also
2357 /// arise when handling interrupts, when implementing green threads with
2358 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2359 /// discussion of [memory barriers].
2363 /// Panics if `order` is [`Relaxed`].
2367 /// Without `compiler_fence`, the `assert_eq!` in following code
2368 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2369 /// To see why, remember that the compiler is free to swap the stores to
2370 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2371 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2372 /// after `IS_READY` is updated, then the signal handler will see
2373 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2374 /// Using a `compiler_fence` remedies this situation.
2377 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2378 /// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
2379 /// use std::sync::atomic::Ordering;
2380 /// use std::sync::atomic::compiler_fence;
2382 /// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
2383 /// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
2386 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2387 /// // prevent earlier writes from being moved beyond this point
2388 /// compiler_fence(Ordering::Release);
2389 /// IS_READY.store(true, Ordering::Relaxed);
2392 /// fn signal_handler() {
2393 /// if IS_READY.load(Ordering::Relaxed) {
2394 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2399 /// [`fence`]: fn.fence.html
2400 /// [`Ordering`]: enum.Ordering.html
2401 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2402 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2403 /// [`Release`]: enum.Ordering.html#variant.Release
2404 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2405 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2406 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2408 #[stable(feature = "compiler_fences", since = "1.21.0")]
2409 pub fn compiler_fence(order: Ordering) {
2412 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2413 Release => intrinsics::atomic_singlethreadfence_rel(),
2414 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2415 SeqCst => intrinsics::atomic_singlethreadfence(),
2416 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2422 #[cfg(target_has_atomic = "8")]
2423 #[stable(feature = "atomic_debug", since = "1.3.0")]
2424 impl fmt::Debug for AtomicBool {
2425 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2426 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2430 #[cfg(target_has_atomic = "ptr")]
2431 #[stable(feature = "atomic_debug", since = "1.3.0")]
2432 impl<T> fmt::Debug for AtomicPtr<T> {
2433 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2434 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2438 #[cfg(target_has_atomic = "ptr")]
2439 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2440 impl<T> fmt::Pointer for AtomicPtr<T> {
2441 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2442 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)