1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, `AtomicUsize`, and `AtomicOption`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [C++11 atomic orderings][1].
26 //! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
40 //! A simple spinlock:
43 //! use std::sync::Arc;
44 //! use std::sync::atomic::{AtomicUsize, Ordering};
45 //! use std::thread::Thread;
48 //! let spinlock = Arc::new(AtomicUsize::new(1));
50 //! let spinlock_clone = spinlock.clone();
51 //! Thread::spawn(move|| {
52 //! spinlock_clone.store(0, Ordering::SeqCst);
55 //! // Wait for the other task to release the lock
56 //! while spinlock.load(Ordering::SeqCst) != 0 {}
60 //! Keep a global count of live tasks:
63 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
65 //! static GLOBAL_TASK_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
67 //! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, Ordering::SeqCst);
68 //! println!("live tasks: {}", old_task_count + 1);
73 use self::Ordering::*;
80 /// A boolean type which can be safely shared between threads.
82 pub struct AtomicBool {
86 unsafe impl Sync for AtomicBool {}
88 /// A signed integer type which can be safely shared between threads.
90 pub struct AtomicIsize {
94 unsafe impl Sync for AtomicIsize {}
96 /// An unsigned integer type which can be safely shared between threads.
98 pub struct AtomicUsize {
102 unsafe impl Sync for AtomicUsize {}
104 /// A raw pointer type which can be safely shared between threads.
106 pub struct AtomicPtr<T> {
107 p: UnsafeCell<usize>,
110 unsafe impl<T> Sync for AtomicPtr<T> {}
112 /// Atomic memory orderings
114 /// Memory orderings limit the ways that both the compiler and CPU may reorder
115 /// instructions around atomic operations. At its most restrictive,
116 /// "sequentially consistent" atomics allow neither reads nor writes
117 /// to be moved either before or after the atomic operation; on the other end
118 /// "relaxed" atomics allow all reorderings.
120 /// Rust's memory orderings are [the same as
121 /// C++'s](http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync).
125 /// No ordering constraints, only atomic operations.
128 /// When coupled with a store, all previous writes become visible
129 /// to another thread that performs a load with `Acquire` ordering
130 /// on the same value.
133 /// When coupled with a load, all subsequent loads will see data
134 /// written before a store with `Release` ordering on the same value
135 /// in another thread.
138 /// When coupled with a load, uses `Acquire` ordering, and with a store
139 /// `Release` ordering.
142 /// Like `AcqRel` with the additional guarantee that all threads see all
143 /// sequentially consistent operations in the same order.
148 /// An `AtomicBool` initialized to `false`.
150 pub const ATOMIC_BOOL_INIT: AtomicBool =
151 AtomicBool { v: UnsafeCell { value: 0 } };
152 /// An `AtomicIsize` initialized to `0`.
154 pub const ATOMIC_ISIZE_INIT: AtomicIsize =
155 AtomicIsize { v: UnsafeCell { value: 0 } };
156 /// An `AtomicUsize` initialized to `0`.
158 pub const ATOMIC_USIZE_INIT: AtomicUsize =
159 AtomicUsize { v: UnsafeCell { value: 0, } };
161 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
162 const UINT_TRUE: usize = -1;
165 /// Creates a new `AtomicBool`.
170 /// use std::sync::atomic::AtomicBool;
172 /// let atomic_true = AtomicBool::new(true);
173 /// let atomic_false = AtomicBool::new(false);
177 pub fn new(v: bool) -> AtomicBool {
178 let val = if v { UINT_TRUE } else { 0 };
179 AtomicBool { v: UnsafeCell::new(val) }
182 /// Loads a value from the bool.
184 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
188 /// Panics if `order` is `Release` or `AcqRel`.
193 /// use std::sync::atomic::{AtomicBool, Ordering};
195 /// let some_bool = AtomicBool::new(true);
197 /// let value = some_bool.load(Ordering::Relaxed);
201 pub fn load(&self, order: Ordering) -> bool {
202 unsafe { atomic_load(self.v.get() as *const usize, order) > 0 }
205 /// Stores a value into the bool.
207 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
212 /// use std::sync::atomic::{AtomicBool, Ordering};
214 /// let some_bool = AtomicBool::new(true);
216 /// some_bool.store(false, Ordering::Relaxed);
221 /// Panics if `order` is `Acquire` or `AcqRel`.
224 pub fn store(&self, val: bool, order: Ordering) {
225 let val = if val { UINT_TRUE } else { 0 };
227 unsafe { atomic_store(self.v.get(), val, order); }
230 /// Stores a value into the bool, returning the old value.
232 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
237 /// use std::sync::atomic::{AtomicBool, Ordering};
239 /// let some_bool = AtomicBool::new(true);
241 /// let value = some_bool.swap(false, Ordering::Relaxed);
245 pub fn swap(&self, val: bool, order: Ordering) -> bool {
246 let val = if val { UINT_TRUE } else { 0 };
248 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
251 /// Stores a value into the bool if the current value is the same as the expected value.
253 /// If the return value is equal to `old` then the value was updated.
255 /// `swap` also takes an `Ordering` argument which describes the memory ordering of this
261 /// use std::sync::atomic::{AtomicBool, Ordering};
263 /// let some_bool = AtomicBool::new(true);
265 /// let value = some_bool.store(false, Ordering::Relaxed);
269 pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool {
270 let old = if old { UINT_TRUE } else { 0 };
271 let new = if new { UINT_TRUE } else { 0 };
273 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
276 /// Logical "and" with a boolean value.
278 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
279 /// the new value to the result.
281 /// Returns the previous value.
286 /// use std::sync::atomic::{AtomicBool, Ordering};
288 /// let foo = AtomicBool::new(true);
289 /// assert_eq!(true, foo.fetch_and(false, Ordering::SeqCst));
290 /// assert_eq!(false, foo.load(Ordering::SeqCst));
292 /// let foo = AtomicBool::new(true);
293 /// assert_eq!(true, foo.fetch_and(true, Ordering::SeqCst));
294 /// assert_eq!(true, foo.load(Ordering::SeqCst));
296 /// let foo = AtomicBool::new(false);
297 /// assert_eq!(false, foo.fetch_and(false, Ordering::SeqCst));
298 /// assert_eq!(false, foo.load(Ordering::SeqCst));
302 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
303 let val = if val { UINT_TRUE } else { 0 };
305 unsafe { atomic_and(self.v.get(), val, order) > 0 }
308 /// Logical "nand" with a boolean value.
310 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
311 /// the new value to the result.
313 /// Returns the previous value.
318 /// use std::sync::atomic::{AtomicBool, Ordering};
320 /// let foo = AtomicBool::new(true);
321 /// assert_eq!(true, foo.fetch_nand(false, Ordering::SeqCst));
322 /// assert_eq!(true, foo.load(Ordering::SeqCst));
324 /// let foo = AtomicBool::new(true);
325 /// assert_eq!(true, foo.fetch_nand(true, Ordering::SeqCst));
326 /// assert_eq!(0, foo.load(Ordering::SeqCst) as usize);
327 /// assert_eq!(false, foo.load(Ordering::SeqCst));
329 /// let foo = AtomicBool::new(false);
330 /// assert_eq!(false, foo.fetch_nand(false, Ordering::SeqCst));
331 /// assert_eq!(true, foo.load(Ordering::SeqCst));
335 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
336 let val = if val { UINT_TRUE } else { 0 };
338 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
341 /// Logical "or" with a boolean value.
343 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
344 /// new value to the result.
346 /// Returns the previous value.
351 /// use std::sync::atomic::{AtomicBool, Ordering};
353 /// let foo = AtomicBool::new(true);
354 /// assert_eq!(true, foo.fetch_or(false, Ordering::SeqCst));
355 /// assert_eq!(true, foo.load(Ordering::SeqCst));
357 /// let foo = AtomicBool::new(true);
358 /// assert_eq!(true, foo.fetch_or(true, Ordering::SeqCst));
359 /// assert_eq!(true, foo.load(Ordering::SeqCst));
361 /// let foo = AtomicBool::new(false);
362 /// assert_eq!(false, foo.fetch_or(false, Ordering::SeqCst));
363 /// assert_eq!(false, foo.load(Ordering::SeqCst));
367 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
368 let val = if val { UINT_TRUE } else { 0 };
370 unsafe { atomic_or(self.v.get(), val, order) > 0 }
373 /// Logical "xor" with a boolean value.
375 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
376 /// the new value to the result.
378 /// Returns the previous value.
383 /// use std::sync::atomic::{AtomicBool, Ordering};
385 /// let foo = AtomicBool::new(true);
386 /// assert_eq!(true, foo.fetch_xor(false, Ordering::SeqCst));
387 /// assert_eq!(true, foo.load(Ordering::SeqCst));
389 /// let foo = AtomicBool::new(true);
390 /// assert_eq!(true, foo.fetch_xor(true, Ordering::SeqCst));
391 /// assert_eq!(false, foo.load(Ordering::SeqCst));
393 /// let foo = AtomicBool::new(false);
394 /// assert_eq!(false, foo.fetch_xor(false, Ordering::SeqCst));
395 /// assert_eq!(false, foo.load(Ordering::SeqCst));
399 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
400 let val = if val { UINT_TRUE } else { 0 };
402 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
408 /// Creates a new `AtomicIsize`.
413 /// use std::sync::atomic::AtomicIsize;
415 /// let atomic_forty_two = AtomicIsize::new(42);
418 pub fn new(v: isize) -> AtomicIsize {
419 AtomicIsize {v: UnsafeCell::new(v)}
422 /// Loads a value from the isize.
424 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
428 /// Panics if `order` is `Release` or `AcqRel`.
433 /// use std::sync::atomic::{AtomicIsize, Ordering};
435 /// let some_isize = AtomicIsize::new(5);
437 /// let value = some_isize.load(Ordering::Relaxed);
440 pub fn load(&self, order: Ordering) -> isize {
441 unsafe { atomic_load(self.v.get() as *const isize, order) }
444 /// Stores a value into the isize.
446 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
451 /// use std::sync::atomic::{AtomicIsize, Ordering};
453 /// let some_isize = AtomicIsize::new(5);
455 /// some_isize.store(10, Ordering::Relaxed);
460 /// Panics if `order` is `Acquire` or `AcqRel`.
462 pub fn store(&self, val: isize, order: Ordering) {
463 unsafe { atomic_store(self.v.get(), val, order); }
466 /// Stores a value into the isize, returning the old value.
468 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
473 /// use std::sync::atomic::{AtomicIsize, Ordering};
475 /// let some_isize = AtomicIsize::new(5);
477 /// let value = some_isize.swap(10, Ordering::Relaxed);
480 pub fn swap(&self, val: isize, order: Ordering) -> isize {
481 unsafe { atomic_swap(self.v.get(), val, order) }
484 /// Stores a value into the isize if the current value is the same as the expected value.
486 /// If the return value is equal to `old` then the value was updated.
488 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
494 /// use std::sync::atomic::{AtomicIsize, Ordering};
496 /// let some_isize = AtomicIsize::new(5);
498 /// let value = some_isize.compare_and_swap(5, 10, Ordering::Relaxed);
501 pub fn compare_and_swap(&self, old: isize, new: isize, order: Ordering) -> isize {
502 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
505 /// Add an isize to the current value, returning the previous value.
510 /// use std::sync::atomic::{AtomicIsize, Ordering};
512 /// let foo = AtomicIsize::new(0);
513 /// assert_eq!(0, foo.fetch_add(10, Ordering::SeqCst));
514 /// assert_eq!(10, foo.load(Ordering::SeqCst));
517 pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
518 unsafe { atomic_add(self.v.get(), val, order) }
521 /// Subtract an isize from the current value, returning the previous value.
526 /// use std::sync::atomic::{AtomicIsize, Ordering};
528 /// let foo = AtomicIsize::new(0);
529 /// assert_eq!(0, foo.fetch_sub(10, Ordering::SeqCst));
530 /// assert_eq!(-10, foo.load(Ordering::SeqCst));
533 pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
534 unsafe { atomic_sub(self.v.get(), val, order) }
537 /// Bitwise and with the current isize, returning the previous value.
542 /// use std::sync::atomic::{AtomicIsize, Ordering};
544 /// let foo = AtomicIsize::new(0b101101);
545 /// assert_eq!(0b101101, foo.fetch_and(0b110011, Ordering::SeqCst));
546 /// assert_eq!(0b100001, foo.load(Ordering::SeqCst));
548 pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
549 unsafe { atomic_and(self.v.get(), val, order) }
552 /// Bitwise or with the current isize, returning the previous value.
557 /// use std::sync::atomic::{AtomicIsize, Ordering};
559 /// let foo = AtomicIsize::new(0b101101);
560 /// assert_eq!(0b101101, foo.fetch_or(0b110011, Ordering::SeqCst));
561 /// assert_eq!(0b111111, foo.load(Ordering::SeqCst));
563 pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
564 unsafe { atomic_or(self.v.get(), val, order) }
567 /// Bitwise xor with the current isize, returning the previous value.
572 /// use std::sync::atomic::{AtomicIsize, Ordering};
574 /// let foo = AtomicIsize::new(0b101101);
575 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, Ordering::SeqCst));
576 /// assert_eq!(0b011110, foo.load(Ordering::SeqCst));
578 pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
579 unsafe { atomic_xor(self.v.get(), val, order) }
585 /// Creates a new `AtomicUsize`.
590 /// use std::sync::atomic::AtomicUsize;
592 /// let atomic_forty_two = AtomicUsize::new(42u);
595 pub fn new(v: usize) -> AtomicUsize {
596 AtomicUsize { v: UnsafeCell::new(v) }
599 /// Loads a value from the usize.
601 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
605 /// Panics if `order` is `Release` or `AcqRel`.
610 /// use std::sync::atomic::{AtomicUsize, Ordering};
612 /// let some_usize = AtomicUsize::new(5);
614 /// let value = some_usize.load(Ordering::Relaxed);
617 pub fn load(&self, order: Ordering) -> usize {
618 unsafe { atomic_load(self.v.get() as *const usize, order) }
621 /// Stores a value into the usize.
623 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
628 /// use std::sync::atomic::{AtomicUsize, Ordering};
630 /// let some_usize = AtomicUsize::new(5);
632 /// some_usize.store(10, Ordering::Relaxed);
637 /// Panics if `order` is `Acquire` or `AcqRel`.
639 pub fn store(&self, val: usize, order: Ordering) {
640 unsafe { atomic_store(self.v.get(), val, order); }
643 /// Stores a value into the usize, returning the old value.
645 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
650 /// use std::sync::atomic::{AtomicUsize, Ordering};
652 /// let some_usize= AtomicUsize::new(5);
654 /// let value = some_usize.swap(10, Ordering::Relaxed);
657 pub fn swap(&self, val: usize, order: Ordering) -> usize {
658 unsafe { atomic_swap(self.v.get(), val, order) }
661 /// Stores a value into the usize if the current value is the same as the expected value.
663 /// If the return value is equal to `old` then the value was updated.
665 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
671 /// use std::sync::atomic::{AtomicUsize, Ordering};
673 /// let some_usize = AtomicUsize::new(5);
675 /// let value = some_usize.compare_and_swap(5, 10, Ordering::Relaxed);
678 pub fn compare_and_swap(&self, old: usize, new: usize, order: Ordering) -> usize {
679 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
682 /// Add to the current usize, returning the previous value.
687 /// use std::sync::atomic::{AtomicUsize, Ordering};
689 /// let foo = AtomicUsize::new(0);
690 /// assert_eq!(0, foo.fetch_add(10, Ordering::SeqCst));
691 /// assert_eq!(10, foo.load(Ordering::SeqCst));
694 pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
695 unsafe { atomic_add(self.v.get(), val, order) }
698 /// Subtract from the current usize, returning the previous value.
703 /// use std::sync::atomic::{AtomicUsize, Ordering};
705 /// let foo = AtomicUsize::new(10);
706 /// assert_eq!(10, foo.fetch_sub(10, Ordering::SeqCst));
707 /// assert_eq!(0, foo.load(Ordering::SeqCst));
710 pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
711 unsafe { atomic_sub(self.v.get(), val, order) }
714 /// Bitwise and with the current usize, returning the previous value.
719 /// use std::sync::atomic::{AtomicUsize, Ordering};
721 /// let foo = AtomicUsize::new(0b101101);
722 /// assert_eq!(0b101101, foo.fetch_and(0b110011, Ordering::SeqCst));
723 /// assert_eq!(0b100001, foo.load(Ordering::SeqCst));
725 pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
726 unsafe { atomic_and(self.v.get(), val, order) }
729 /// Bitwise or with the current usize, returning the previous value.
734 /// use std::sync::atomic::{AtomicUsize, Ordering};
736 /// let foo = AtomicUsize::new(0b101101);
737 /// assert_eq!(0b101101, foo.fetch_or(0b110011, Ordering::SeqCst));
738 /// assert_eq!(0b111111, foo.load(Ordering::SeqCst));
740 pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
741 unsafe { atomic_or(self.v.get(), val, order) }
744 /// Bitwise xor with the current usize, returning the previous value.
749 /// use std::sync::atomic::{AtomicUsize, Ordering};
751 /// let foo = AtomicUsize::new(0b101101);
752 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, Ordering::SeqCst));
753 /// assert_eq!(0b011110, foo.load(Ordering::SeqCst));
755 pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
756 unsafe { atomic_xor(self.v.get(), val, order) }
760 impl<T> AtomicPtr<T> {
761 /// Creates a new `AtomicPtr`.
766 /// use std::sync::atomic::AtomicPtr;
768 /// let ptr = &mut 5i;
769 /// let atomic_ptr = AtomicPtr::new(ptr);
773 pub fn new(p: *mut T) -> AtomicPtr<T> {
774 AtomicPtr { p: UnsafeCell::new(p as usize) }
777 /// Loads a value from the pointer.
779 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
783 /// Panics if `order` is `Release` or `AcqRel`.
788 /// use std::sync::atomic::{AtomicPtr, Ordering};
790 /// let ptr = &mut 5i;
791 /// let some_ptr = AtomicPtr::new(ptr);
793 /// let value = some_ptr.load(Ordering::Relaxed);
797 pub fn load(&self, order: Ordering) -> *mut T {
799 atomic_load(self.p.get() as *const *mut T, order) as *mut T
803 /// Stores a value into the pointer.
805 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
810 /// use std::sync::atomic::{AtomicPtr, Ordering};
812 /// let ptr = &mut 5i;
813 /// let some_ptr = AtomicPtr::new(ptr);
815 /// let other_ptr = &mut 10i;
817 /// some_ptr.store(other_ptr, Ordering::Relaxed);
822 /// Panics if `order` is `Acquire` or `AcqRel`.
825 pub fn store(&self, ptr: *mut T, order: Ordering) {
826 unsafe { atomic_store(self.p.get(), ptr as usize, order); }
829 /// Stores a value into the pointer, returning the old value.
831 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
836 /// use std::sync::atomic::{AtomicPtr, Ordering};
838 /// let ptr = &mut 5i;
839 /// let some_ptr = AtomicPtr::new(ptr);
841 /// let other_ptr = &mut 10i;
843 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
847 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
848 unsafe { atomic_swap(self.p.get(), ptr as usize, order) as *mut T }
851 /// Stores a value into the pointer if the current value is the same as the expected value.
853 /// If the return value is equal to `old` then the value was updated.
855 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
861 /// use std::sync::atomic::{AtomicPtr, Ordering};
863 /// let ptr = &mut 5i;
864 /// let some_ptr = AtomicPtr::new(ptr);
866 /// let other_ptr = &mut 10i;
867 /// let another_ptr = &mut 10i;
869 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
873 pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
875 atomic_compare_and_swap(self.p.get(), old as usize,
876 new as usize, order) as *mut T
882 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
884 Release => intrinsics::atomic_store_rel(dst, val),
885 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
886 SeqCst => intrinsics::atomic_store(dst, val),
887 Acquire => panic!("there is no such thing as an acquire store"),
888 AcqRel => panic!("there is no such thing as an acquire/release store"),
894 unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
896 Acquire => intrinsics::atomic_load_acq(dst),
897 Relaxed => intrinsics::atomic_load_relaxed(dst),
898 SeqCst => intrinsics::atomic_load(dst),
899 Release => panic!("there is no such thing as a release load"),
900 AcqRel => panic!("there is no such thing as an acquire/release load"),
906 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
908 Acquire => intrinsics::atomic_xchg_acq(dst, val),
909 Release => intrinsics::atomic_xchg_rel(dst, val),
910 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
911 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
912 SeqCst => intrinsics::atomic_xchg(dst, val)
916 /// Returns the old value (like __sync_fetch_and_add).
919 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
921 Acquire => intrinsics::atomic_xadd_acq(dst, val),
922 Release => intrinsics::atomic_xadd_rel(dst, val),
923 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
924 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
925 SeqCst => intrinsics::atomic_xadd(dst, val)
929 /// Returns the old value (like __sync_fetch_and_sub).
932 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
934 Acquire => intrinsics::atomic_xsub_acq(dst, val),
935 Release => intrinsics::atomic_xsub_rel(dst, val),
936 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
937 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
938 SeqCst => intrinsics::atomic_xsub(dst, val)
944 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
946 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
947 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
948 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
949 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
950 SeqCst => intrinsics::atomic_cxchg(dst, old, new),
956 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
958 Acquire => intrinsics::atomic_and_acq(dst, val),
959 Release => intrinsics::atomic_and_rel(dst, val),
960 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
961 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
962 SeqCst => intrinsics::atomic_and(dst, val)
968 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
970 Acquire => intrinsics::atomic_nand_acq(dst, val),
971 Release => intrinsics::atomic_nand_rel(dst, val),
972 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
973 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
974 SeqCst => intrinsics::atomic_nand(dst, val)
981 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
983 Acquire => intrinsics::atomic_or_acq(dst, val),
984 Release => intrinsics::atomic_or_rel(dst, val),
985 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
986 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
987 SeqCst => intrinsics::atomic_or(dst, val)
994 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
996 Acquire => intrinsics::atomic_xor_acq(dst, val),
997 Release => intrinsics::atomic_xor_rel(dst, val),
998 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
999 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1000 SeqCst => intrinsics::atomic_xor(dst, val)
1005 /// An atomic fence.
1007 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1008 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1009 /// atomic operations X and Y, both operating on some atomic object 'M' such
1010 /// that A is sequenced before X, Y is synchronized before B and Y observes
1011 /// the change to M. This provides a happens-before dependence between A and B.
1013 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1016 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1017 /// and `Release` semantics, participates in the global program order of the
1018 /// other `SeqCst` operations and/or fences.
1020 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1024 /// Panics if `order` is `Relaxed`.
1027 pub fn fence(order: Ordering) {
1030 Acquire => intrinsics::atomic_fence_acq(),
1031 Release => intrinsics::atomic_fence_rel(),
1032 AcqRel => intrinsics::atomic_fence_acqrel(),
1033 SeqCst => intrinsics::atomic_fence(),
1034 Relaxed => panic!("there is no such thing as a relaxed fence")
1039 #[deprecated="renamed to AtomicIsize"]
1040 #[allow(missing_docs)]
1041 pub struct AtomicInt {
1045 unsafe impl Sync for AtomicInt {}
1047 #[deprecated="renamed to AtomicUsize"]
1048 #[allow(missing_docs)]
1049 pub struct AtomicUint {
1050 v: UnsafeCell<uint>,
1053 unsafe impl Sync for AtomicUint {}
1055 #[deprecated="use ATOMIC_ISIZE_INIT instead"]
1056 #[allow(missing_docs, deprecated)]
1057 pub const ATOMIC_INT_INIT: AtomicInt =
1058 AtomicInt { v: UnsafeCell { value: 0 } };
1059 #[deprecated="use ATOMIC_USIZE_INIT instead"]
1060 #[allow(missing_docs, deprecated)]
1061 pub const ATOMIC_UINT_INIT: AtomicUint =
1062 AtomicUint { v: UnsafeCell { value: 0, } };
1064 #[allow(missing_docs, deprecated)]
1067 pub fn new(v: int) -> AtomicInt {
1068 AtomicInt {v: UnsafeCell::new(v)}
1072 pub fn load(&self, order: Ordering) -> int {
1073 unsafe { atomic_load(self.v.get() as *const int, order) }
1077 pub fn store(&self, val: int, order: Ordering) {
1078 unsafe { atomic_store(self.v.get(), val, order); }
1082 pub fn swap(&self, val: int, order: Ordering) -> int {
1083 unsafe { atomic_swap(self.v.get(), val, order) }
1087 pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int {
1088 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
1092 pub fn fetch_add(&self, val: int, order: Ordering) -> int {
1093 unsafe { atomic_add(self.v.get(), val, order) }
1097 pub fn fetch_sub(&self, val: int, order: Ordering) -> int {
1098 unsafe { atomic_sub(self.v.get(), val, order) }
1102 pub fn fetch_and(&self, val: int, order: Ordering) -> int {
1103 unsafe { atomic_and(self.v.get(), val, order) }
1107 pub fn fetch_or(&self, val: int, order: Ordering) -> int {
1108 unsafe { atomic_or(self.v.get(), val, order) }
1112 pub fn fetch_xor(&self, val: int, order: Ordering) -> int {
1113 unsafe { atomic_xor(self.v.get(), val, order) }
1117 #[allow(missing_docs, deprecated)]
1120 pub fn new(v: uint) -> AtomicUint {
1121 AtomicUint { v: UnsafeCell::new(v) }
1125 pub fn load(&self, order: Ordering) -> uint {
1126 unsafe { atomic_load(self.v.get() as *const uint, order) }
1130 pub fn store(&self, val: uint, order: Ordering) {
1131 unsafe { atomic_store(self.v.get(), val, order); }
1135 pub fn swap(&self, val: uint, order: Ordering) -> uint {
1136 unsafe { atomic_swap(self.v.get(), val, order) }
1140 pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint {
1141 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
1145 pub fn fetch_add(&self, val: uint, order: Ordering) -> uint {
1146 unsafe { atomic_add(self.v.get(), val, order) }
1150 pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
1151 unsafe { atomic_sub(self.v.get(), val, order) }
1155 pub fn fetch_and(&self, val: uint, order: Ordering) -> uint {
1156 unsafe { atomic_and(self.v.get(), val, order) }
1160 pub fn fetch_or(&self, val: uint, order: Ordering) -> uint {
1161 unsafe { atomic_or(self.v.get(), val, order) }
1165 pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint {
1166 unsafe { atomic_xor(self.v.get(), val, order) }