1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
40 //! A simple spinlock:
43 //! use std::sync::Arc;
44 //! use std::sync::atomic::{AtomicUsize, Ordering};
48 //! let spinlock = Arc::new(AtomicUsize::new(1));
50 //! let spinlock_clone = spinlock.clone();
51 //! thread::spawn(move|| {
52 //! spinlock_clone.store(0, Ordering::SeqCst);
55 //! // Wait for the other thread to release the lock
56 //! while spinlock.load(Ordering::SeqCst) != 0 {}
60 //! Keep a global count of live threads:
63 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
65 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
67 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
68 //! println!("live threads: {}", old_thread_count + 1);
71 #![stable(feature = "rust1", since = "1.0.0")]
73 use self::Ordering::*;
75 use marker::{Send, Sync};
83 /// A boolean type which can be safely shared between threads.
84 #[stable(feature = "rust1", since = "1.0.0")]
85 pub struct AtomicBool {
89 impl Default for AtomicBool {
90 fn default() -> Self {
91 Self::new(Default::default())
95 // Send is implicitly implemented for AtomicBool.
96 unsafe impl Sync for AtomicBool {}
98 /// A signed integer type which can be safely shared between threads.
99 #[stable(feature = "rust1", since = "1.0.0")]
100 pub struct AtomicIsize {
101 v: UnsafeCell<isize>,
104 impl Default for AtomicIsize {
105 fn default() -> Self {
106 Self::new(Default::default())
110 // Send is implicitly implemented for AtomicIsize.
111 unsafe impl Sync for AtomicIsize {}
113 /// An unsigned integer type which can be safely shared between threads.
114 #[stable(feature = "rust1", since = "1.0.0")]
115 pub struct AtomicUsize {
116 v: UnsafeCell<usize>,
119 impl Default for AtomicUsize {
120 fn default() -> Self {
121 Self::new(Default::default())
125 // Send is implicitly implemented for AtomicUsize.
126 unsafe impl Sync for AtomicUsize {}
128 /// A raw pointer type which can be safely shared between threads.
129 #[stable(feature = "rust1", since = "1.0.0")]
130 pub struct AtomicPtr<T> {
131 p: UnsafeCell<*mut T>,
134 impl<T> Default for AtomicPtr<T> {
135 fn default() -> AtomicPtr<T> {
136 AtomicPtr::new(::ptr::null_mut())
140 unsafe impl<T> Send for AtomicPtr<T> {}
141 unsafe impl<T> Sync for AtomicPtr<T> {}
143 /// Atomic memory orderings
145 /// Memory orderings limit the ways that both the compiler and CPU may reorder
146 /// instructions around atomic operations. At its most restrictive,
147 /// "sequentially consistent" atomics allow neither reads nor writes
148 /// to be moved either before or after the atomic operation; on the other end
149 /// "relaxed" atomics allow all reorderings.
151 /// Rust's memory orderings are [the same as
152 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
153 #[stable(feature = "rust1", since = "1.0.0")]
154 #[derive(Copy, Clone)]
156 /// No ordering constraints, only atomic operations.
157 #[stable(feature = "rust1", since = "1.0.0")]
159 /// When coupled with a store, all previous writes become visible
160 /// to another thread that performs a load with `Acquire` ordering
161 /// on the same value.
162 #[stable(feature = "rust1", since = "1.0.0")]
164 /// When coupled with a load, all subsequent loads will see data
165 /// written before a store with `Release` ordering on the same value
166 /// in another thread.
167 #[stable(feature = "rust1", since = "1.0.0")]
169 /// When coupled with a load, uses `Acquire` ordering, and with a store
170 /// `Release` ordering.
171 #[stable(feature = "rust1", since = "1.0.0")]
173 /// Like `AcqRel` with the additional guarantee that all threads see all
174 /// sequentially consistent operations in the same order.
175 #[stable(feature = "rust1", since = "1.0.0")]
179 /// An `AtomicBool` initialized to `false`.
180 #[stable(feature = "rust1", since = "1.0.0")]
181 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
182 /// An `AtomicIsize` initialized to `0`.
183 #[stable(feature = "rust1", since = "1.0.0")]
184 pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
185 /// An `AtomicUsize` initialized to `0`.
186 #[stable(feature = "rust1", since = "1.0.0")]
187 pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
189 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
190 const UINT_TRUE: usize = !0;
193 /// Creates a new `AtomicBool`.
198 /// use std::sync::atomic::AtomicBool;
200 /// let atomic_true = AtomicBool::new(true);
201 /// let atomic_false = AtomicBool::new(false);
204 #[stable(feature = "rust1", since = "1.0.0")]
205 pub const fn new(v: bool) -> AtomicBool {
206 AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
209 /// Loads a value from the bool.
211 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
215 /// Panics if `order` is `Release` or `AcqRel`.
220 /// use std::sync::atomic::{AtomicBool, Ordering};
222 /// let some_bool = AtomicBool::new(true);
224 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
227 #[stable(feature = "rust1", since = "1.0.0")]
228 pub fn load(&self, order: Ordering) -> bool {
229 unsafe { atomic_load(self.v.get(), order) > 0 }
232 /// Stores a value into the bool.
234 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
239 /// use std::sync::atomic::{AtomicBool, Ordering};
241 /// let some_bool = AtomicBool::new(true);
243 /// some_bool.store(false, Ordering::Relaxed);
244 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
249 /// Panics if `order` is `Acquire` or `AcqRel`.
251 #[stable(feature = "rust1", since = "1.0.0")]
252 pub fn store(&self, val: bool, order: Ordering) {
253 let val = if val { UINT_TRUE } else { 0 };
255 unsafe { atomic_store(self.v.get(), val, order); }
258 /// Stores a value into the bool, returning the old value.
260 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
265 /// use std::sync::atomic::{AtomicBool, Ordering};
267 /// let some_bool = AtomicBool::new(true);
269 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
270 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
273 #[stable(feature = "rust1", since = "1.0.0")]
274 pub fn swap(&self, val: bool, order: Ordering) -> bool {
275 let val = if val { UINT_TRUE } else { 0 };
277 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
280 /// Stores a value into the `bool` if the current value is the same as the `current` value.
282 /// The return value is always the previous value. If it is equal to `current`, then the value
285 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
291 /// use std::sync::atomic::{AtomicBool, Ordering};
293 /// let some_bool = AtomicBool::new(true);
295 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
296 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
298 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
299 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
302 #[stable(feature = "rust1", since = "1.0.0")]
303 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
304 let current = if current { UINT_TRUE } else { 0 };
305 let new = if new { UINT_TRUE } else { 0 };
307 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
310 /// Logical "and" with a boolean value.
312 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
313 /// the new value to the result.
315 /// Returns the previous value.
320 /// use std::sync::atomic::{AtomicBool, Ordering};
322 /// let foo = AtomicBool::new(true);
323 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
324 /// assert_eq!(foo.load(Ordering::SeqCst), false);
326 /// let foo = AtomicBool::new(true);
327 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
328 /// assert_eq!(foo.load(Ordering::SeqCst), true);
330 /// let foo = AtomicBool::new(false);
331 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
332 /// assert_eq!(foo.load(Ordering::SeqCst), false);
335 #[stable(feature = "rust1", since = "1.0.0")]
336 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
337 let val = if val { UINT_TRUE } else { 0 };
339 unsafe { atomic_and(self.v.get(), val, order) > 0 }
342 /// Logical "nand" with a boolean value.
344 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
345 /// the new value to the result.
347 /// Returns the previous value.
352 /// use std::sync::atomic::{AtomicBool, Ordering};
354 /// let foo = AtomicBool::new(true);
355 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
356 /// assert_eq!(foo.load(Ordering::SeqCst), true);
358 /// let foo = AtomicBool::new(true);
359 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
360 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
361 /// assert_eq!(foo.load(Ordering::SeqCst), false);
363 /// let foo = AtomicBool::new(false);
364 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
365 /// assert_eq!(foo.load(Ordering::SeqCst), true);
368 #[stable(feature = "rust1", since = "1.0.0")]
369 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
370 let val = if val { UINT_TRUE } else { 0 };
372 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
375 /// Logical "or" with a boolean value.
377 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
378 /// new value to the result.
380 /// Returns the previous value.
385 /// use std::sync::atomic::{AtomicBool, Ordering};
387 /// let foo = AtomicBool::new(true);
388 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
389 /// assert_eq!(foo.load(Ordering::SeqCst), true);
391 /// let foo = AtomicBool::new(true);
392 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
393 /// assert_eq!(foo.load(Ordering::SeqCst), true);
395 /// let foo = AtomicBool::new(false);
396 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
397 /// assert_eq!(foo.load(Ordering::SeqCst), false);
400 #[stable(feature = "rust1", since = "1.0.0")]
401 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
402 let val = if val { UINT_TRUE } else { 0 };
404 unsafe { atomic_or(self.v.get(), val, order) > 0 }
407 /// Logical "xor" with a boolean value.
409 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
410 /// the new value to the result.
412 /// Returns the previous value.
417 /// use std::sync::atomic::{AtomicBool, Ordering};
419 /// let foo = AtomicBool::new(true);
420 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
421 /// assert_eq!(foo.load(Ordering::SeqCst), true);
423 /// let foo = AtomicBool::new(true);
424 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
425 /// assert_eq!(foo.load(Ordering::SeqCst), false);
427 /// let foo = AtomicBool::new(false);
428 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
429 /// assert_eq!(foo.load(Ordering::SeqCst), false);
432 #[stable(feature = "rust1", since = "1.0.0")]
433 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
434 let val = if val { UINT_TRUE } else { 0 };
436 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
440 #[stable(feature = "rust1", since = "1.0.0")]
442 /// Creates a new `AtomicIsize`.
447 /// use std::sync::atomic::AtomicIsize;
449 /// let atomic_forty_two = AtomicIsize::new(42);
452 #[stable(feature = "rust1", since = "1.0.0")]
453 pub const fn new(v: isize) -> AtomicIsize {
454 AtomicIsize {v: UnsafeCell::new(v)}
457 /// Loads a value from the isize.
459 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
463 /// Panics if `order` is `Release` or `AcqRel`.
468 /// use std::sync::atomic::{AtomicIsize, Ordering};
470 /// let some_isize = AtomicIsize::new(5);
472 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
475 #[stable(feature = "rust1", since = "1.0.0")]
476 pub fn load(&self, order: Ordering) -> isize {
477 unsafe { atomic_load(self.v.get(), order) }
480 /// Stores a value into the isize.
482 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
487 /// use std::sync::atomic::{AtomicIsize, Ordering};
489 /// let some_isize = AtomicIsize::new(5);
491 /// some_isize.store(10, Ordering::Relaxed);
492 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
497 /// Panics if `order` is `Acquire` or `AcqRel`.
499 #[stable(feature = "rust1", since = "1.0.0")]
500 pub fn store(&self, val: isize, order: Ordering) {
501 unsafe { atomic_store(self.v.get(), val, order); }
504 /// Stores a value into the isize, returning the old value.
506 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
511 /// use std::sync::atomic::{AtomicIsize, Ordering};
513 /// let some_isize = AtomicIsize::new(5);
515 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
518 #[stable(feature = "rust1", since = "1.0.0")]
519 pub fn swap(&self, val: isize, order: Ordering) -> isize {
520 unsafe { atomic_swap(self.v.get(), val, order) }
523 /// Stores a value into the `isize` if the current value is the same as the `current` value.
525 /// The return value is always the previous value. If it is equal to `current`, then the value
528 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
534 /// use std::sync::atomic::{AtomicIsize, Ordering};
536 /// let some_isize = AtomicIsize::new(5);
538 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
539 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
541 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
542 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
545 #[stable(feature = "rust1", since = "1.0.0")]
546 pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
547 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
550 /// Add an isize to the current value, returning the previous value.
555 /// use std::sync::atomic::{AtomicIsize, Ordering};
557 /// let foo = AtomicIsize::new(0);
558 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
559 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
562 #[stable(feature = "rust1", since = "1.0.0")]
563 pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
564 unsafe { atomic_add(self.v.get(), val, order) }
567 /// Subtract an isize from the current value, returning the previous value.
572 /// use std::sync::atomic::{AtomicIsize, Ordering};
574 /// let foo = AtomicIsize::new(0);
575 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
576 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
579 #[stable(feature = "rust1", since = "1.0.0")]
580 pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
581 unsafe { atomic_sub(self.v.get(), val, order) }
584 /// Bitwise and with the current isize, returning the previous value.
589 /// use std::sync::atomic::{AtomicIsize, Ordering};
591 /// let foo = AtomicIsize::new(0b101101);
592 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
593 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
595 #[stable(feature = "rust1", since = "1.0.0")]
596 pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
597 unsafe { atomic_and(self.v.get(), val, order) }
600 /// Bitwise or with the current isize, returning the previous value.
605 /// use std::sync::atomic::{AtomicIsize, Ordering};
607 /// let foo = AtomicIsize::new(0b101101);
608 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
609 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
611 #[stable(feature = "rust1", since = "1.0.0")]
612 pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
613 unsafe { atomic_or(self.v.get(), val, order) }
616 /// Bitwise xor with the current isize, returning the previous value.
621 /// use std::sync::atomic::{AtomicIsize, Ordering};
623 /// let foo = AtomicIsize::new(0b101101);
624 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
625 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
627 #[stable(feature = "rust1", since = "1.0.0")]
628 pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
629 unsafe { atomic_xor(self.v.get(), val, order) }
633 #[stable(feature = "rust1", since = "1.0.0")]
635 /// Creates a new `AtomicUsize`.
640 /// use std::sync::atomic::AtomicUsize;
642 /// let atomic_forty_two = AtomicUsize::new(42);
645 #[stable(feature = "rust1", since = "1.0.0")]
646 pub const fn new(v: usize) -> AtomicUsize {
647 AtomicUsize { v: UnsafeCell::new(v) }
650 /// Loads a value from the usize.
652 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
656 /// Panics if `order` is `Release` or `AcqRel`.
661 /// use std::sync::atomic::{AtomicUsize, Ordering};
663 /// let some_usize = AtomicUsize::new(5);
665 /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
668 #[stable(feature = "rust1", since = "1.0.0")]
669 pub fn load(&self, order: Ordering) -> usize {
670 unsafe { atomic_load(self.v.get(), order) }
673 /// Stores a value into the usize.
675 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
680 /// use std::sync::atomic::{AtomicUsize, Ordering};
682 /// let some_usize = AtomicUsize::new(5);
684 /// some_usize.store(10, Ordering::Relaxed);
685 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
690 /// Panics if `order` is `Acquire` or `AcqRel`.
692 #[stable(feature = "rust1", since = "1.0.0")]
693 pub fn store(&self, val: usize, order: Ordering) {
694 unsafe { atomic_store(self.v.get(), val, order); }
697 /// Stores a value into the usize, returning the old value.
699 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
704 /// use std::sync::atomic::{AtomicUsize, Ordering};
706 /// let some_usize= AtomicUsize::new(5);
708 /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
709 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
712 #[stable(feature = "rust1", since = "1.0.0")]
713 pub fn swap(&self, val: usize, order: Ordering) -> usize {
714 unsafe { atomic_swap(self.v.get(), val, order) }
717 /// Stores a value into the `usize` if the current value is the same as the `current` value.
719 /// The return value is always the previous value. If it is equal to `current`, then the value
722 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
728 /// use std::sync::atomic::{AtomicUsize, Ordering};
730 /// let some_usize = AtomicUsize::new(5);
732 /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
733 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
735 /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
736 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
739 #[stable(feature = "rust1", since = "1.0.0")]
740 pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
741 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
744 /// Add to the current usize, returning the previous value.
749 /// use std::sync::atomic::{AtomicUsize, Ordering};
751 /// let foo = AtomicUsize::new(0);
752 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
753 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
756 #[stable(feature = "rust1", since = "1.0.0")]
757 pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
758 unsafe { atomic_add(self.v.get(), val, order) }
761 /// Subtract from the current usize, returning the previous value.
766 /// use std::sync::atomic::{AtomicUsize, Ordering};
768 /// let foo = AtomicUsize::new(10);
769 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
770 /// assert_eq!(foo.load(Ordering::SeqCst), 0);
773 #[stable(feature = "rust1", since = "1.0.0")]
774 pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
775 unsafe { atomic_sub(self.v.get(), val, order) }
778 /// Bitwise and with the current usize, returning the previous value.
783 /// use std::sync::atomic::{AtomicUsize, Ordering};
785 /// let foo = AtomicUsize::new(0b101101);
786 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
787 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
789 #[stable(feature = "rust1", since = "1.0.0")]
790 pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
791 unsafe { atomic_and(self.v.get(), val, order) }
794 /// Bitwise or with the current usize, returning the previous value.
799 /// use std::sync::atomic::{AtomicUsize, Ordering};
801 /// let foo = AtomicUsize::new(0b101101);
802 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
803 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
805 #[stable(feature = "rust1", since = "1.0.0")]
806 pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
807 unsafe { atomic_or(self.v.get(), val, order) }
810 /// Bitwise xor with the current usize, returning the previous value.
815 /// use std::sync::atomic::{AtomicUsize, Ordering};
817 /// let foo = AtomicUsize::new(0b101101);
818 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
819 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
821 #[stable(feature = "rust1", since = "1.0.0")]
822 pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
823 unsafe { atomic_xor(self.v.get(), val, order) }
827 impl<T> AtomicPtr<T> {
828 /// Creates a new `AtomicPtr`.
833 /// use std::sync::atomic::AtomicPtr;
835 /// let ptr = &mut 5;
836 /// let atomic_ptr = AtomicPtr::new(ptr);
839 #[stable(feature = "rust1", since = "1.0.0")]
840 pub const fn new(p: *mut T) -> AtomicPtr<T> {
841 AtomicPtr { p: UnsafeCell::new(p) }
844 /// Loads a value from the pointer.
846 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
850 /// Panics if `order` is `Release` or `AcqRel`.
855 /// use std::sync::atomic::{AtomicPtr, Ordering};
857 /// let ptr = &mut 5;
858 /// let some_ptr = AtomicPtr::new(ptr);
860 /// let value = some_ptr.load(Ordering::Relaxed);
863 #[stable(feature = "rust1", since = "1.0.0")]
864 pub fn load(&self, order: Ordering) -> *mut T {
866 atomic_load(self.p.get() as *mut usize, order) as *mut T
870 /// Stores a value into the pointer.
872 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
877 /// use std::sync::atomic::{AtomicPtr, Ordering};
879 /// let ptr = &mut 5;
880 /// let some_ptr = AtomicPtr::new(ptr);
882 /// let other_ptr = &mut 10;
884 /// some_ptr.store(other_ptr, Ordering::Relaxed);
889 /// Panics if `order` is `Acquire` or `AcqRel`.
891 #[stable(feature = "rust1", since = "1.0.0")]
892 pub fn store(&self, ptr: *mut T, order: Ordering) {
893 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
896 /// Stores a value into the pointer, returning the old value.
898 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
903 /// use std::sync::atomic::{AtomicPtr, Ordering};
905 /// let ptr = &mut 5;
906 /// let some_ptr = AtomicPtr::new(ptr);
908 /// let other_ptr = &mut 10;
910 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
913 #[stable(feature = "rust1", since = "1.0.0")]
914 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
915 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
918 /// Stores a value into the pointer if the current value is the same as the `current` value.
920 /// The return value is always the previous value. If it is equal to `current`, then the value
923 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
929 /// use std::sync::atomic::{AtomicPtr, Ordering};
931 /// let ptr = &mut 5;
932 /// let some_ptr = AtomicPtr::new(ptr);
934 /// let other_ptr = &mut 10;
935 /// let another_ptr = &mut 10;
937 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
940 #[stable(feature = "rust1", since = "1.0.0")]
941 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
943 atomic_compare_and_swap(self.p.get() as *mut usize, current as usize,
944 new as usize, order) as *mut T
950 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
952 Release => intrinsics::atomic_store_rel(dst, val),
953 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
954 SeqCst => intrinsics::atomic_store(dst, val),
955 Acquire => panic!("there is no such thing as an acquire store"),
956 AcqRel => panic!("there is no such thing as an acquire/release store"),
961 unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
963 Acquire => intrinsics::atomic_load_acq(dst),
964 Relaxed => intrinsics::atomic_load_relaxed(dst),
965 SeqCst => intrinsics::atomic_load(dst),
966 Release => panic!("there is no such thing as a release load"),
967 AcqRel => panic!("there is no such thing as an acquire/release load"),
972 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
974 Acquire => intrinsics::atomic_xchg_acq(dst, val),
975 Release => intrinsics::atomic_xchg_rel(dst, val),
976 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
977 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
978 SeqCst => intrinsics::atomic_xchg(dst, val)
982 /// Returns the old value (like __sync_fetch_and_add).
984 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
986 Acquire => intrinsics::atomic_xadd_acq(dst, val),
987 Release => intrinsics::atomic_xadd_rel(dst, val),
988 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
989 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
990 SeqCst => intrinsics::atomic_xadd(dst, val)
994 /// Returns the old value (like __sync_fetch_and_sub).
996 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
998 Acquire => intrinsics::atomic_xsub_acq(dst, val),
999 Release => intrinsics::atomic_xsub_rel(dst, val),
1000 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1001 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1002 SeqCst => intrinsics::atomic_xsub(dst, val)
1007 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
1009 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
1010 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
1011 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1012 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1013 SeqCst => intrinsics::atomic_cxchg(dst, old, new),
1018 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1020 Acquire => intrinsics::atomic_and_acq(dst, val),
1021 Release => intrinsics::atomic_and_rel(dst, val),
1022 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1023 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1024 SeqCst => intrinsics::atomic_and(dst, val)
1029 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
1031 Acquire => intrinsics::atomic_nand_acq(dst, val),
1032 Release => intrinsics::atomic_nand_rel(dst, val),
1033 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
1034 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
1035 SeqCst => intrinsics::atomic_nand(dst, val)
1041 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1043 Acquire => intrinsics::atomic_or_acq(dst, val),
1044 Release => intrinsics::atomic_or_rel(dst, val),
1045 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1046 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1047 SeqCst => intrinsics::atomic_or(dst, val)
1053 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1055 Acquire => intrinsics::atomic_xor_acq(dst, val),
1056 Release => intrinsics::atomic_xor_rel(dst, val),
1057 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1058 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1059 SeqCst => intrinsics::atomic_xor(dst, val)
1064 /// An atomic fence.
1066 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1067 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1068 /// atomic operations X and Y, both operating on some atomic object 'M' such
1069 /// that A is sequenced before X, Y is synchronized before B and Y observes
1070 /// the change to M. This provides a happens-before dependence between A and B.
1072 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1075 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1076 /// and `Release` semantics, participates in the global program order of the
1077 /// other `SeqCst` operations and/or fences.
1079 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1083 /// Panics if `order` is `Relaxed`.
1085 #[stable(feature = "rust1", since = "1.0.0")]
1086 pub fn fence(order: Ordering) {
1089 Acquire => intrinsics::atomic_fence_acq(),
1090 Release => intrinsics::atomic_fence_rel(),
1091 AcqRel => intrinsics::atomic_fence_acqrel(),
1092 SeqCst => intrinsics::atomic_fence(),
1093 Relaxed => panic!("there is no such thing as a relaxed fence")
1098 macro_rules! impl_Debug {
1099 ($($t:ident)*) => ($(
1100 #[stable(feature = "atomic_debug", since = "1.3.0")]
1101 impl fmt::Debug for $t {
1102 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1103 f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish()
1109 impl_Debug!{ AtomicUsize AtomicIsize AtomicBool }
1111 #[stable(feature = "atomic_debug", since = "1.3.0")]
1112 impl<T> fmt::Debug for AtomicPtr<T> {
1113 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1114 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()