1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Core atomic primitives
15 pub use self::Ordering::*;
22 /// A boolean type which can be safely shared between threads.
24 pub struct AtomicBool {
28 unsafe impl Sync for AtomicBool {}
30 /// A signed integer type which can be safely shared between threads.
32 pub struct AtomicInt {
36 unsafe impl Sync for AtomicInt {}
38 /// An unsigned integer type which can be safely shared between threads.
40 pub struct AtomicUint {
44 unsafe impl Sync for AtomicUint {}
46 /// A raw pointer type which can be safely shared between threads.
48 pub struct AtomicPtr<T> {
52 unsafe impl<T> Sync for AtomicPtr<T> {}
54 /// Atomic memory orderings
56 /// Memory orderings limit the ways that both the compiler and CPU may reorder
57 /// instructions around atomic operations. At its most restrictive,
58 /// "sequentially consistent" atomics allow neither reads nor writes
59 /// to be moved either before or after the atomic operation; on the other end
60 /// "relaxed" atomics allow all reorderings.
62 /// Rust's memory orderings are [the same as
63 /// C++'s](http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync).
67 /// No ordering constraints, only atomic operations.
70 /// When coupled with a store, all previous writes become visible
71 /// to another thread that performs a load with `Acquire` ordering
72 /// on the same value.
75 /// When coupled with a load, all subsequent loads will see data
76 /// written before a store with `Release` ordering on the same value
77 /// in another thread.
80 /// When coupled with a load, uses `Acquire` ordering, and with a store
81 /// `Release` ordering.
84 /// Like `AcqRel` with the additional guarantee that all threads see all
85 /// sequentially consistent operations in the same order.
90 /// An `AtomicBool` initialized to `false`.
91 #[unstable = "may be renamed, pending conventions for static initalizers"]
92 pub const INIT_ATOMIC_BOOL: AtomicBool =
93 AtomicBool { v: UnsafeCell { value: 0 } };
94 /// An `AtomicInt` initialized to `0`.
95 #[unstable = "may be renamed, pending conventions for static initalizers"]
96 pub const INIT_ATOMIC_INT: AtomicInt =
97 AtomicInt { v: UnsafeCell { value: 0 } };
98 /// An `AtomicUint` initialized to `0`.
99 #[unstable = "may be renamed, pending conventions for static initalizers"]
100 pub const INIT_ATOMIC_UINT: AtomicUint =
101 AtomicUint { v: UnsafeCell { value: 0, } };
103 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
104 const UINT_TRUE: uint = -1;
107 /// Creates a new `AtomicBool`.
112 /// use std::sync::atomic::AtomicBool;
114 /// let atomic_true = AtomicBool::new(true);
115 /// let atomic_false = AtomicBool::new(false);
119 pub fn new(v: bool) -> AtomicBool {
120 let val = if v { UINT_TRUE } else { 0 };
121 AtomicBool { v: UnsafeCell::new(val) }
124 /// Loads a value from the bool.
126 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
130 /// Panics if `order` is `Release` or `AcqRel`.
135 /// use std::sync::atomic::{AtomicBool, Ordering};
137 /// let some_bool = AtomicBool::new(true);
139 /// let value = some_bool.load(Ordering::Relaxed);
143 pub fn load(&self, order: Ordering) -> bool {
144 unsafe { atomic_load(self.v.get() as *const uint, order) > 0 }
147 /// Stores a value into the bool.
149 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
154 /// use std::sync::atomic::{AtomicBool, Ordering};
156 /// let some_bool = AtomicBool::new(true);
158 /// some_bool.store(false, Ordering::Relaxed);
163 /// Panics if `order` is `Acquire` or `AcqRel`.
166 pub fn store(&self, val: bool, order: Ordering) {
167 let val = if val { UINT_TRUE } else { 0 };
169 unsafe { atomic_store(self.v.get(), val, order); }
172 /// Stores a value into the bool, returning the old value.
174 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
179 /// use std::sync::atomic::{AtomicBool, Ordering};
181 /// let some_bool = AtomicBool::new(true);
183 /// let value = some_bool.swap(false, Ordering::Relaxed);
187 pub fn swap(&self, val: bool, order: Ordering) -> bool {
188 let val = if val { UINT_TRUE } else { 0 };
190 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
193 /// Stores a value into the bool if the current value is the same as the expected value.
195 /// If the return value is equal to `old` then the value was updated.
197 /// `swap` also takes an `Ordering` argument which describes the memory ordering of this
203 /// use std::sync::atomic::{AtomicBool, Ordering};
205 /// let some_bool = AtomicBool::new(true);
207 /// let value = some_bool.store(false, Ordering::Relaxed);
211 pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool {
212 let old = if old { UINT_TRUE } else { 0 };
213 let new = if new { UINT_TRUE } else { 0 };
215 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
218 /// Logical "and" with a boolean value.
220 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
221 /// the new value to the result.
223 /// Returns the previous value.
228 /// use std::sync::atomic::{AtomicBool, SeqCst};
230 /// let foo = AtomicBool::new(true);
231 /// assert_eq!(true, foo.fetch_and(false, SeqCst));
232 /// assert_eq!(false, foo.load(SeqCst));
234 /// let foo = AtomicBool::new(true);
235 /// assert_eq!(true, foo.fetch_and(true, SeqCst));
236 /// assert_eq!(true, foo.load(SeqCst));
238 /// let foo = AtomicBool::new(false);
239 /// assert_eq!(false, foo.fetch_and(false, SeqCst));
240 /// assert_eq!(false, foo.load(SeqCst));
244 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
245 let val = if val { UINT_TRUE } else { 0 };
247 unsafe { atomic_and(self.v.get(), val, order) > 0 }
250 /// Logical "nand" with a boolean value.
252 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
253 /// the new value to the result.
255 /// Returns the previous value.
260 /// use std::sync::atomic::{AtomicBool, SeqCst};
262 /// let foo = AtomicBool::new(true);
263 /// assert_eq!(true, foo.fetch_nand(false, SeqCst));
264 /// assert_eq!(true, foo.load(SeqCst));
266 /// let foo = AtomicBool::new(true);
267 /// assert_eq!(true, foo.fetch_nand(true, SeqCst));
268 /// assert_eq!(0, foo.load(SeqCst) as int);
269 /// assert_eq!(false, foo.load(SeqCst));
271 /// let foo = AtomicBool::new(false);
272 /// assert_eq!(false, foo.fetch_nand(false, SeqCst));
273 /// assert_eq!(true, foo.load(SeqCst));
277 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
278 let val = if val { UINT_TRUE } else { 0 };
280 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
283 /// Logical "or" with a boolean value.
285 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
286 /// new value to the result.
288 /// Returns the previous value.
293 /// use std::sync::atomic::{AtomicBool, SeqCst};
295 /// let foo = AtomicBool::new(true);
296 /// assert_eq!(true, foo.fetch_or(false, SeqCst));
297 /// assert_eq!(true, foo.load(SeqCst));
299 /// let foo = AtomicBool::new(true);
300 /// assert_eq!(true, foo.fetch_or(true, SeqCst));
301 /// assert_eq!(true, foo.load(SeqCst));
303 /// let foo = AtomicBool::new(false);
304 /// assert_eq!(false, foo.fetch_or(false, SeqCst));
305 /// assert_eq!(false, foo.load(SeqCst));
309 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
310 let val = if val { UINT_TRUE } else { 0 };
312 unsafe { atomic_or(self.v.get(), val, order) > 0 }
315 /// Logical "xor" with a boolean value.
317 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
318 /// the new value to the result.
320 /// Returns the previous value.
325 /// use std::sync::atomic::{AtomicBool, SeqCst};
327 /// let foo = AtomicBool::new(true);
328 /// assert_eq!(true, foo.fetch_xor(false, SeqCst));
329 /// assert_eq!(true, foo.load(SeqCst));
331 /// let foo = AtomicBool::new(true);
332 /// assert_eq!(true, foo.fetch_xor(true, SeqCst));
333 /// assert_eq!(false, foo.load(SeqCst));
335 /// let foo = AtomicBool::new(false);
336 /// assert_eq!(false, foo.fetch_xor(false, SeqCst));
337 /// assert_eq!(false, foo.load(SeqCst));
341 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
342 let val = if val { UINT_TRUE } else { 0 };
344 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
349 /// Creates a new `AtomicInt`.
354 /// use std::sync::atomic::AtomicInt;
356 /// let atomic_forty_two = AtomicInt::new(42);
360 pub fn new(v: int) -> AtomicInt {
361 AtomicInt {v: UnsafeCell::new(v)}
364 /// Loads a value from the int.
366 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
370 /// Panics if `order` is `Release` or `AcqRel`.
375 /// use std::sync::atomic::{AtomicInt, Ordering};
377 /// let some_int = AtomicInt::new(5);
379 /// let value = some_int.load(Ordering::Relaxed);
383 pub fn load(&self, order: Ordering) -> int {
384 unsafe { atomic_load(self.v.get() as *const int, order) }
387 /// Stores a value into the int.
389 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
394 /// use std::sync::atomic::{AtomicInt, Ordering};
396 /// let some_int = AtomicInt::new(5);
398 /// some_int.store(10, Ordering::Relaxed);
403 /// Panics if `order` is `Acquire` or `AcqRel`.
406 pub fn store(&self, val: int, order: Ordering) {
407 unsafe { atomic_store(self.v.get(), val, order); }
410 /// Stores a value into the int, returning the old value.
412 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
417 /// use std::sync::atomic::{AtomicInt, Ordering};
419 /// let some_int = AtomicInt::new(5);
421 /// let value = some_int.swap(10, Ordering::Relaxed);
425 pub fn swap(&self, val: int, order: Ordering) -> int {
426 unsafe { atomic_swap(self.v.get(), val, order) }
429 /// Stores a value into the int if the current value is the same as the expected value.
431 /// If the return value is equal to `old` then the value was updated.
433 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
439 /// use std::sync::atomic::{AtomicInt, Ordering};
441 /// let some_int = AtomicInt::new(5);
443 /// let value = some_int.compare_and_swap(5, 10, Ordering::Relaxed);
447 pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int {
448 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
451 /// Add an int to the current value, returning the previous value.
456 /// use std::sync::atomic::{AtomicInt, SeqCst};
458 /// let foo = AtomicInt::new(0);
459 /// assert_eq!(0, foo.fetch_add(10, SeqCst));
460 /// assert_eq!(10, foo.load(SeqCst));
464 pub fn fetch_add(&self, val: int, order: Ordering) -> int {
465 unsafe { atomic_add(self.v.get(), val, order) }
468 /// Subtract an int from the current value, returning the previous value.
473 /// use std::sync::atomic::{AtomicInt, SeqCst};
475 /// let foo = AtomicInt::new(0);
476 /// assert_eq!(0, foo.fetch_sub(10, SeqCst));
477 /// assert_eq!(-10, foo.load(SeqCst));
481 pub fn fetch_sub(&self, val: int, order: Ordering) -> int {
482 unsafe { atomic_sub(self.v.get(), val, order) }
485 /// Bitwise and with the current int, returning the previous value.
490 /// use std::sync::atomic::{AtomicInt, SeqCst};
492 /// let foo = AtomicInt::new(0b101101);
493 /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
494 /// assert_eq!(0b100001, foo.load(SeqCst));
497 pub fn fetch_and(&self, val: int, order: Ordering) -> int {
498 unsafe { atomic_and(self.v.get(), val, order) }
501 /// Bitwise or with the current int, returning the previous value.
506 /// use std::sync::atomic::{AtomicInt, SeqCst};
508 /// let foo = AtomicInt::new(0b101101);
509 /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
510 /// assert_eq!(0b111111, foo.load(SeqCst));
513 pub fn fetch_or(&self, val: int, order: Ordering) -> int {
514 unsafe { atomic_or(self.v.get(), val, order) }
517 /// Bitwise xor with the current int, returning the previous value.
522 /// use std::sync::atomic::{AtomicInt, SeqCst};
524 /// let foo = AtomicInt::new(0b101101);
525 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
526 /// assert_eq!(0b011110, foo.load(SeqCst));
529 pub fn fetch_xor(&self, val: int, order: Ordering) -> int {
530 unsafe { atomic_xor(self.v.get(), val, order) }
535 /// Creates a new `AtomicUint`.
540 /// use std::sync::atomic::AtomicUint;
542 /// let atomic_forty_two = AtomicUint::new(42u);
546 pub fn new(v: uint) -> AtomicUint {
547 AtomicUint { v: UnsafeCell::new(v) }
550 /// Loads a value from the uint.
552 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
556 /// Panics if `order` is `Release` or `AcqRel`.
561 /// use std::sync::atomic::{AtomicUint, Ordering};
563 /// let some_uint = AtomicUint::new(5);
565 /// let value = some_uint.load(Ordering::Relaxed);
569 pub fn load(&self, order: Ordering) -> uint {
570 unsafe { atomic_load(self.v.get() as *const uint, order) }
573 /// Stores a value into the uint.
575 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
580 /// use std::sync::atomic::{AtomicUint, Ordering};
582 /// let some_uint = AtomicUint::new(5);
584 /// some_uint.store(10, Ordering::Relaxed);
589 /// Panics if `order` is `Acquire` or `AcqRel`.
592 pub fn store(&self, val: uint, order: Ordering) {
593 unsafe { atomic_store(self.v.get(), val, order); }
596 /// Stores a value into the uint, returning the old value.
598 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
603 /// use std::sync::atomic::{AtomicUint, Ordering};
605 /// let some_uint = AtomicUint::new(5);
607 /// let value = some_uint.swap(10, Ordering::Relaxed);
611 pub fn swap(&self, val: uint, order: Ordering) -> uint {
612 unsafe { atomic_swap(self.v.get(), val, order) }
615 /// Stores a value into the uint if the current value is the same as the expected value.
617 /// If the return value is equal to `old` then the value was updated.
619 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
625 /// use std::sync::atomic::{AtomicUint, Ordering};
627 /// let some_uint = AtomicUint::new(5);
629 /// let value = some_uint.compare_and_swap(5, 10, Ordering::Relaxed);
633 pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint {
634 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
637 /// Add to the current uint, returning the previous value.
642 /// use std::sync::atomic::{AtomicUint, SeqCst};
644 /// let foo = AtomicUint::new(0);
645 /// assert_eq!(0, foo.fetch_add(10, SeqCst));
646 /// assert_eq!(10, foo.load(SeqCst));
650 pub fn fetch_add(&self, val: uint, order: Ordering) -> uint {
651 unsafe { atomic_add(self.v.get(), val, order) }
654 /// Subtract from the current uint, returning the previous value.
659 /// use std::sync::atomic::{AtomicUint, SeqCst};
661 /// let foo = AtomicUint::new(10);
662 /// assert_eq!(10, foo.fetch_sub(10, SeqCst));
663 /// assert_eq!(0, foo.load(SeqCst));
667 pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
668 unsafe { atomic_sub(self.v.get(), val, order) }
671 /// Bitwise and with the current uint, returning the previous value.
676 /// use std::sync::atomic::{AtomicUint, SeqCst};
678 /// let foo = AtomicUint::new(0b101101);
679 /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
680 /// assert_eq!(0b100001, foo.load(SeqCst));
683 pub fn fetch_and(&self, val: uint, order: Ordering) -> uint {
684 unsafe { atomic_and(self.v.get(), val, order) }
687 /// Bitwise or with the current uint, returning the previous value.
692 /// use std::sync::atomic::{AtomicUint, SeqCst};
694 /// let foo = AtomicUint::new(0b101101);
695 /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
696 /// assert_eq!(0b111111, foo.load(SeqCst));
699 pub fn fetch_or(&self, val: uint, order: Ordering) -> uint {
700 unsafe { atomic_or(self.v.get(), val, order) }
703 /// Bitwise xor with the current uint, returning the previous value.
708 /// use std::sync::atomic::{AtomicUint, SeqCst};
710 /// let foo = AtomicUint::new(0b101101);
711 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
712 /// assert_eq!(0b011110, foo.load(SeqCst));
715 pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint {
716 unsafe { atomic_xor(self.v.get(), val, order) }
720 impl<T> AtomicPtr<T> {
721 /// Creates a new `AtomicPtr`.
726 /// use std::sync::atomic::AtomicPtr;
728 /// let ptr = &mut 5i;
729 /// let atomic_ptr = AtomicPtr::new(ptr);
733 pub fn new(p: *mut T) -> AtomicPtr<T> {
734 AtomicPtr { p: UnsafeCell::new(p as uint) }
737 /// Loads a value from the pointer.
739 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
743 /// Panics if `order` is `Release` or `AcqRel`.
748 /// use std::sync::atomic::{AtomicPtr, Ordering};
750 /// let ptr = &mut 5i;
751 /// let some_ptr = AtomicPtr::new(ptr);
753 /// let value = some_ptr.load(Ordering::Relaxed);
757 pub fn load(&self, order: Ordering) -> *mut T {
759 atomic_load(self.p.get() as *const *mut T, order) as *mut T
763 /// Stores a value into the pointer.
765 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
770 /// use std::sync::atomic::{AtomicPtr, Ordering};
772 /// let ptr = &mut 5i;
773 /// let some_ptr = AtomicPtr::new(ptr);
775 /// let other_ptr = &mut 10i;
777 /// some_ptr.store(other_ptr, Ordering::Relaxed);
782 /// Panics if `order` is `Acquire` or `AcqRel`.
785 pub fn store(&self, ptr: *mut T, order: Ordering) {
786 unsafe { atomic_store(self.p.get(), ptr as uint, order); }
789 /// Stores a value into the pointer, returning the old value.
791 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
796 /// use std::sync::atomic::{AtomicPtr, Ordering};
798 /// let ptr = &mut 5i;
799 /// let some_ptr = AtomicPtr::new(ptr);
801 /// let other_ptr = &mut 10i;
803 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
807 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
808 unsafe { atomic_swap(self.p.get(), ptr as uint, order) as *mut T }
811 /// Stores a value into the pointer if the current value is the same as the expected value.
813 /// If the return value is equal to `old` then the value was updated.
815 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
821 /// use std::sync::atomic::{AtomicPtr, Ordering};
823 /// let ptr = &mut 5i;
824 /// let some_ptr = AtomicPtr::new(ptr);
826 /// let other_ptr = &mut 10i;
827 /// let another_ptr = &mut 10i;
829 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
833 pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
835 atomic_compare_and_swap(self.p.get(), old as uint,
836 new as uint, order) as *mut T
842 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
844 Release => intrinsics::atomic_store_rel(dst, val),
845 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
846 SeqCst => intrinsics::atomic_store(dst, val),
847 Acquire => panic!("there is no such thing as an acquire store"),
848 AcqRel => panic!("there is no such thing as an acquire/release store"),
854 unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
856 Acquire => intrinsics::atomic_load_acq(dst),
857 Relaxed => intrinsics::atomic_load_relaxed(dst),
858 SeqCst => intrinsics::atomic_load(dst),
859 Release => panic!("there is no such thing as a release load"),
860 AcqRel => panic!("there is no such thing as an acquire/release load"),
866 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
868 Acquire => intrinsics::atomic_xchg_acq(dst, val),
869 Release => intrinsics::atomic_xchg_rel(dst, val),
870 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
871 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
872 SeqCst => intrinsics::atomic_xchg(dst, val)
876 /// Returns the old value (like __sync_fetch_and_add).
879 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
881 Acquire => intrinsics::atomic_xadd_acq(dst, val),
882 Release => intrinsics::atomic_xadd_rel(dst, val),
883 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
884 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
885 SeqCst => intrinsics::atomic_xadd(dst, val)
889 /// Returns the old value (like __sync_fetch_and_sub).
892 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
894 Acquire => intrinsics::atomic_xsub_acq(dst, val),
895 Release => intrinsics::atomic_xsub_rel(dst, val),
896 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
897 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
898 SeqCst => intrinsics::atomic_xsub(dst, val)
904 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
906 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
907 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
908 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
909 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
910 SeqCst => intrinsics::atomic_cxchg(dst, old, new),
916 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
918 Acquire => intrinsics::atomic_and_acq(dst, val),
919 Release => intrinsics::atomic_and_rel(dst, val),
920 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
921 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
922 SeqCst => intrinsics::atomic_and(dst, val)
928 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
930 Acquire => intrinsics::atomic_nand_acq(dst, val),
931 Release => intrinsics::atomic_nand_rel(dst, val),
932 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
933 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
934 SeqCst => intrinsics::atomic_nand(dst, val)
941 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
943 Acquire => intrinsics::atomic_or_acq(dst, val),
944 Release => intrinsics::atomic_or_rel(dst, val),
945 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
946 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
947 SeqCst => intrinsics::atomic_or(dst, val)
954 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
956 Acquire => intrinsics::atomic_xor_acq(dst, val),
957 Release => intrinsics::atomic_xor_rel(dst, val),
958 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
959 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
960 SeqCst => intrinsics::atomic_xor(dst, val)
967 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
968 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
969 /// atomic operations X and Y, both operating on some atomic object 'M' such
970 /// that A is sequenced before X, Y is synchronized before B and Y observes
971 /// the change to M. This provides a happens-before dependence between A and B.
973 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
976 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
977 /// and `Release` semantics, participates in the global program order of the
978 /// other `SeqCst` operations and/or fences.
980 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
984 /// Panics if `order` is `Relaxed`.
987 pub fn fence(order: Ordering) {
990 Acquire => intrinsics::atomic_fence_acq(),
991 Release => intrinsics::atomic_fence_rel(),
992 AcqRel => intrinsics::atomic_fence_acqrel(),
993 SeqCst => intrinsics::atomic_fence(),
994 Relaxed => panic!("there is no such thing as a relaxed fence")