1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Core atomic primitives
15 use self::Ordering::*;
22 /// A boolean type which can be safely shared between threads.
24 pub struct AtomicBool {
28 unsafe impl Sync for AtomicBool {}
30 /// A signed integer type which can be safely shared between threads.
32 pub struct AtomicInt {
36 unsafe impl Sync for AtomicInt {}
38 /// An unsigned integer type which can be safely shared between threads.
40 pub struct AtomicUint {
44 unsafe impl Sync for AtomicUint {}
46 /// A raw pointer type which can be safely shared between threads.
48 pub struct AtomicPtr<T> {
52 unsafe impl<T> Sync for AtomicPtr<T> {}
54 /// Atomic memory orderings
56 /// Memory orderings limit the ways that both the compiler and CPU may reorder
57 /// instructions around atomic operations. At its most restrictive,
58 /// "sequentially consistent" atomics allow neither reads nor writes
59 /// to be moved either before or after the atomic operation; on the other end
60 /// "relaxed" atomics allow all reorderings.
62 /// Rust's memory orderings are [the same as
63 /// C++'s](http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync).
67 /// No ordering constraints, only atomic operations.
70 /// When coupled with a store, all previous writes become visible
71 /// to another thread that performs a load with `Acquire` ordering
72 /// on the same value.
75 /// When coupled with a load, all subsequent loads will see data
76 /// written before a store with `Release` ordering on the same value
77 /// in another thread.
80 /// When coupled with a load, uses `Acquire` ordering, and with a store
81 /// `Release` ordering.
84 /// Like `AcqRel` with the additional guarantee that all threads see all
85 /// sequentially consistent operations in the same order.
90 /// An `AtomicBool` initialized to `false`.
91 #[unstable = "may be renamed, pending conventions for static initalizers"]
92 pub const ATOMIC_BOOL_INIT: AtomicBool =
93 AtomicBool { v: UnsafeCell { value: 0 } };
94 /// An `AtomicInt` initialized to `0`.
95 #[unstable = "may be renamed, pending conventions for static initalizers"]
96 pub const ATOMIC_INT_INIT: AtomicInt =
97 AtomicInt { v: UnsafeCell { value: 0 } };
98 /// An `AtomicUint` initialized to `0`.
99 #[unstable = "may be renamed, pending conventions for static initalizers"]
100 pub const ATOMIC_UINT_INIT: AtomicUint =
101 AtomicUint { v: UnsafeCell { value: 0, } };
104 #[deprecated = "renamed to ATOMIC_BOOL_INIT"]
105 pub const INIT_ATOMIC_BOOL: AtomicBool = ATOMIC_BOOL_INIT;
107 #[deprecated = "renamed to ATOMIC_INT_INIT"]
108 pub const INIT_ATOMIC_INT: AtomicInt = ATOMIC_INT_INIT;
110 #[deprecated = "renamed to ATOMIC_UINT_INIT"]
111 pub const INIT_ATOMIC_UINT: AtomicUint = ATOMIC_UINT_INIT;
113 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
114 const UINT_TRUE: uint = -1;
117 /// Creates a new `AtomicBool`.
122 /// use std::sync::atomic::AtomicBool;
124 /// let atomic_true = AtomicBool::new(true);
125 /// let atomic_false = AtomicBool::new(false);
129 pub fn new(v: bool) -> AtomicBool {
130 let val = if v { UINT_TRUE } else { 0 };
131 AtomicBool { v: UnsafeCell::new(val) }
134 /// Loads a value from the bool.
136 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
140 /// Panics if `order` is `Release` or `AcqRel`.
145 /// use std::sync::atomic::{AtomicBool, Ordering};
147 /// let some_bool = AtomicBool::new(true);
149 /// let value = some_bool.load(Ordering::Relaxed);
153 pub fn load(&self, order: Ordering) -> bool {
154 unsafe { atomic_load(self.v.get() as *const uint, order) > 0 }
157 /// Stores a value into the bool.
159 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
164 /// use std::sync::atomic::{AtomicBool, Ordering};
166 /// let some_bool = AtomicBool::new(true);
168 /// some_bool.store(false, Ordering::Relaxed);
173 /// Panics if `order` is `Acquire` or `AcqRel`.
176 pub fn store(&self, val: bool, order: Ordering) {
177 let val = if val { UINT_TRUE } else { 0 };
179 unsafe { atomic_store(self.v.get(), val, order); }
182 /// Stores a value into the bool, returning the old value.
184 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
189 /// use std::sync::atomic::{AtomicBool, Ordering};
191 /// let some_bool = AtomicBool::new(true);
193 /// let value = some_bool.swap(false, Ordering::Relaxed);
197 pub fn swap(&self, val: bool, order: Ordering) -> bool {
198 let val = if val { UINT_TRUE } else { 0 };
200 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
203 /// Stores a value into the bool if the current value is the same as the expected value.
205 /// If the return value is equal to `old` then the value was updated.
207 /// `swap` also takes an `Ordering` argument which describes the memory ordering of this
213 /// use std::sync::atomic::{AtomicBool, Ordering};
215 /// let some_bool = AtomicBool::new(true);
217 /// let value = some_bool.store(false, Ordering::Relaxed);
221 pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool {
222 let old = if old { UINT_TRUE } else { 0 };
223 let new = if new { UINT_TRUE } else { 0 };
225 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
228 /// Logical "and" with a boolean value.
230 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
231 /// the new value to the result.
233 /// Returns the previous value.
238 /// use std::sync::atomic::{AtomicBool, SeqCst};
240 /// let foo = AtomicBool::new(true);
241 /// assert_eq!(true, foo.fetch_and(false, SeqCst));
242 /// assert_eq!(false, foo.load(SeqCst));
244 /// let foo = AtomicBool::new(true);
245 /// assert_eq!(true, foo.fetch_and(true, SeqCst));
246 /// assert_eq!(true, foo.load(SeqCst));
248 /// let foo = AtomicBool::new(false);
249 /// assert_eq!(false, foo.fetch_and(false, SeqCst));
250 /// assert_eq!(false, foo.load(SeqCst));
254 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
255 let val = if val { UINT_TRUE } else { 0 };
257 unsafe { atomic_and(self.v.get(), val, order) > 0 }
260 /// Logical "nand" with a boolean value.
262 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
263 /// the new value to the result.
265 /// Returns the previous value.
270 /// use std::sync::atomic::{AtomicBool, SeqCst};
272 /// let foo = AtomicBool::new(true);
273 /// assert_eq!(true, foo.fetch_nand(false, SeqCst));
274 /// assert_eq!(true, foo.load(SeqCst));
276 /// let foo = AtomicBool::new(true);
277 /// assert_eq!(true, foo.fetch_nand(true, SeqCst));
278 /// assert_eq!(0, foo.load(SeqCst) as int);
279 /// assert_eq!(false, foo.load(SeqCst));
281 /// let foo = AtomicBool::new(false);
282 /// assert_eq!(false, foo.fetch_nand(false, SeqCst));
283 /// assert_eq!(true, foo.load(SeqCst));
287 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
288 let val = if val { UINT_TRUE } else { 0 };
290 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
293 /// Logical "or" with a boolean value.
295 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
296 /// new value to the result.
298 /// Returns the previous value.
303 /// use std::sync::atomic::{AtomicBool, SeqCst};
305 /// let foo = AtomicBool::new(true);
306 /// assert_eq!(true, foo.fetch_or(false, SeqCst));
307 /// assert_eq!(true, foo.load(SeqCst));
309 /// let foo = AtomicBool::new(true);
310 /// assert_eq!(true, foo.fetch_or(true, SeqCst));
311 /// assert_eq!(true, foo.load(SeqCst));
313 /// let foo = AtomicBool::new(false);
314 /// assert_eq!(false, foo.fetch_or(false, SeqCst));
315 /// assert_eq!(false, foo.load(SeqCst));
319 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
320 let val = if val { UINT_TRUE } else { 0 };
322 unsafe { atomic_or(self.v.get(), val, order) > 0 }
325 /// Logical "xor" with a boolean value.
327 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
328 /// the new value to the result.
330 /// Returns the previous value.
335 /// use std::sync::atomic::{AtomicBool, SeqCst};
337 /// let foo = AtomicBool::new(true);
338 /// assert_eq!(true, foo.fetch_xor(false, SeqCst));
339 /// assert_eq!(true, foo.load(SeqCst));
341 /// let foo = AtomicBool::new(true);
342 /// assert_eq!(true, foo.fetch_xor(true, SeqCst));
343 /// assert_eq!(false, foo.load(SeqCst));
345 /// let foo = AtomicBool::new(false);
346 /// assert_eq!(false, foo.fetch_xor(false, SeqCst));
347 /// assert_eq!(false, foo.load(SeqCst));
351 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
352 let val = if val { UINT_TRUE } else { 0 };
354 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
359 /// Creates a new `AtomicInt`.
364 /// use std::sync::atomic::AtomicInt;
366 /// let atomic_forty_two = AtomicInt::new(42);
370 pub fn new(v: int) -> AtomicInt {
371 AtomicInt {v: UnsafeCell::new(v)}
374 /// Loads a value from the int.
376 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
380 /// Panics if `order` is `Release` or `AcqRel`.
385 /// use std::sync::atomic::{AtomicInt, Ordering};
387 /// let some_int = AtomicInt::new(5);
389 /// let value = some_int.load(Ordering::Relaxed);
393 pub fn load(&self, order: Ordering) -> int {
394 unsafe { atomic_load(self.v.get() as *const int, order) }
397 /// Stores a value into the int.
399 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
404 /// use std::sync::atomic::{AtomicInt, Ordering};
406 /// let some_int = AtomicInt::new(5);
408 /// some_int.store(10, Ordering::Relaxed);
413 /// Panics if `order` is `Acquire` or `AcqRel`.
416 pub fn store(&self, val: int, order: Ordering) {
417 unsafe { atomic_store(self.v.get(), val, order); }
420 /// Stores a value into the int, returning the old value.
422 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
427 /// use std::sync::atomic::{AtomicInt, Ordering};
429 /// let some_int = AtomicInt::new(5);
431 /// let value = some_int.swap(10, Ordering::Relaxed);
435 pub fn swap(&self, val: int, order: Ordering) -> int {
436 unsafe { atomic_swap(self.v.get(), val, order) }
439 /// Stores a value into the int if the current value is the same as the expected value.
441 /// If the return value is equal to `old` then the value was updated.
443 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
449 /// use std::sync::atomic::{AtomicInt, Ordering};
451 /// let some_int = AtomicInt::new(5);
453 /// let value = some_int.compare_and_swap(5, 10, Ordering::Relaxed);
457 pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int {
458 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
461 /// Add an int to the current value, returning the previous value.
466 /// use std::sync::atomic::{AtomicInt, SeqCst};
468 /// let foo = AtomicInt::new(0);
469 /// assert_eq!(0, foo.fetch_add(10, SeqCst));
470 /// assert_eq!(10, foo.load(SeqCst));
474 pub fn fetch_add(&self, val: int, order: Ordering) -> int {
475 unsafe { atomic_add(self.v.get(), val, order) }
478 /// Subtract an int from the current value, returning the previous value.
483 /// use std::sync::atomic::{AtomicInt, SeqCst};
485 /// let foo = AtomicInt::new(0);
486 /// assert_eq!(0, foo.fetch_sub(10, SeqCst));
487 /// assert_eq!(-10, foo.load(SeqCst));
491 pub fn fetch_sub(&self, val: int, order: Ordering) -> int {
492 unsafe { atomic_sub(self.v.get(), val, order) }
495 /// Bitwise and with the current int, returning the previous value.
500 /// use std::sync::atomic::{AtomicInt, SeqCst};
502 /// let foo = AtomicInt::new(0b101101);
503 /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
504 /// assert_eq!(0b100001, foo.load(SeqCst));
507 pub fn fetch_and(&self, val: int, order: Ordering) -> int {
508 unsafe { atomic_and(self.v.get(), val, order) }
511 /// Bitwise or with the current int, returning the previous value.
516 /// use std::sync::atomic::{AtomicInt, SeqCst};
518 /// let foo = AtomicInt::new(0b101101);
519 /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
520 /// assert_eq!(0b111111, foo.load(SeqCst));
523 pub fn fetch_or(&self, val: int, order: Ordering) -> int {
524 unsafe { atomic_or(self.v.get(), val, order) }
527 /// Bitwise xor with the current int, returning the previous value.
532 /// use std::sync::atomic::{AtomicInt, SeqCst};
534 /// let foo = AtomicInt::new(0b101101);
535 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
536 /// assert_eq!(0b011110, foo.load(SeqCst));
539 pub fn fetch_xor(&self, val: int, order: Ordering) -> int {
540 unsafe { atomic_xor(self.v.get(), val, order) }
545 /// Creates a new `AtomicUint`.
550 /// use std::sync::atomic::AtomicUint;
552 /// let atomic_forty_two = AtomicUint::new(42u);
556 pub fn new(v: uint) -> AtomicUint {
557 AtomicUint { v: UnsafeCell::new(v) }
560 /// Loads a value from the uint.
562 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
566 /// Panics if `order` is `Release` or `AcqRel`.
571 /// use std::sync::atomic::{AtomicUint, Ordering};
573 /// let some_uint = AtomicUint::new(5);
575 /// let value = some_uint.load(Ordering::Relaxed);
579 pub fn load(&self, order: Ordering) -> uint {
580 unsafe { atomic_load(self.v.get() as *const uint, order) }
583 /// Stores a value into the uint.
585 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
590 /// use std::sync::atomic::{AtomicUint, Ordering};
592 /// let some_uint = AtomicUint::new(5);
594 /// some_uint.store(10, Ordering::Relaxed);
599 /// Panics if `order` is `Acquire` or `AcqRel`.
602 pub fn store(&self, val: uint, order: Ordering) {
603 unsafe { atomic_store(self.v.get(), val, order); }
606 /// Stores a value into the uint, returning the old value.
608 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
613 /// use std::sync::atomic::{AtomicUint, Ordering};
615 /// let some_uint = AtomicUint::new(5);
617 /// let value = some_uint.swap(10, Ordering::Relaxed);
621 pub fn swap(&self, val: uint, order: Ordering) -> uint {
622 unsafe { atomic_swap(self.v.get(), val, order) }
625 /// Stores a value into the uint if the current value is the same as the expected value.
627 /// If the return value is equal to `old` then the value was updated.
629 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
635 /// use std::sync::atomic::{AtomicUint, Ordering};
637 /// let some_uint = AtomicUint::new(5);
639 /// let value = some_uint.compare_and_swap(5, 10, Ordering::Relaxed);
643 pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint {
644 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
647 /// Add to the current uint, returning the previous value.
652 /// use std::sync::atomic::{AtomicUint, SeqCst};
654 /// let foo = AtomicUint::new(0);
655 /// assert_eq!(0, foo.fetch_add(10, SeqCst));
656 /// assert_eq!(10, foo.load(SeqCst));
660 pub fn fetch_add(&self, val: uint, order: Ordering) -> uint {
661 unsafe { atomic_add(self.v.get(), val, order) }
664 /// Subtract from the current uint, returning the previous value.
669 /// use std::sync::atomic::{AtomicUint, SeqCst};
671 /// let foo = AtomicUint::new(10);
672 /// assert_eq!(10, foo.fetch_sub(10, SeqCst));
673 /// assert_eq!(0, foo.load(SeqCst));
677 pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
678 unsafe { atomic_sub(self.v.get(), val, order) }
681 /// Bitwise and with the current uint, returning the previous value.
686 /// use std::sync::atomic::{AtomicUint, SeqCst};
688 /// let foo = AtomicUint::new(0b101101);
689 /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
690 /// assert_eq!(0b100001, foo.load(SeqCst));
693 pub fn fetch_and(&self, val: uint, order: Ordering) -> uint {
694 unsafe { atomic_and(self.v.get(), val, order) }
697 /// Bitwise or with the current uint, returning the previous value.
702 /// use std::sync::atomic::{AtomicUint, SeqCst};
704 /// let foo = AtomicUint::new(0b101101);
705 /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
706 /// assert_eq!(0b111111, foo.load(SeqCst));
709 pub fn fetch_or(&self, val: uint, order: Ordering) -> uint {
710 unsafe { atomic_or(self.v.get(), val, order) }
713 /// Bitwise xor with the current uint, returning the previous value.
718 /// use std::sync::atomic::{AtomicUint, SeqCst};
720 /// let foo = AtomicUint::new(0b101101);
721 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
722 /// assert_eq!(0b011110, foo.load(SeqCst));
725 pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint {
726 unsafe { atomic_xor(self.v.get(), val, order) }
730 impl<T> AtomicPtr<T> {
731 /// Creates a new `AtomicPtr`.
736 /// use std::sync::atomic::AtomicPtr;
738 /// let ptr = &mut 5i;
739 /// let atomic_ptr = AtomicPtr::new(ptr);
743 pub fn new(p: *mut T) -> AtomicPtr<T> {
744 AtomicPtr { p: UnsafeCell::new(p as uint) }
747 /// Loads a value from the pointer.
749 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
753 /// Panics if `order` is `Release` or `AcqRel`.
758 /// use std::sync::atomic::{AtomicPtr, Ordering};
760 /// let ptr = &mut 5i;
761 /// let some_ptr = AtomicPtr::new(ptr);
763 /// let value = some_ptr.load(Ordering::Relaxed);
767 pub fn load(&self, order: Ordering) -> *mut T {
769 atomic_load(self.p.get() as *const *mut T, order) as *mut T
773 /// Stores a value into the pointer.
775 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
780 /// use std::sync::atomic::{AtomicPtr, Ordering};
782 /// let ptr = &mut 5i;
783 /// let some_ptr = AtomicPtr::new(ptr);
785 /// let other_ptr = &mut 10i;
787 /// some_ptr.store(other_ptr, Ordering::Relaxed);
792 /// Panics if `order` is `Acquire` or `AcqRel`.
795 pub fn store(&self, ptr: *mut T, order: Ordering) {
796 unsafe { atomic_store(self.p.get(), ptr as uint, order); }
799 /// Stores a value into the pointer, returning the old value.
801 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
806 /// use std::sync::atomic::{AtomicPtr, Ordering};
808 /// let ptr = &mut 5i;
809 /// let some_ptr = AtomicPtr::new(ptr);
811 /// let other_ptr = &mut 10i;
813 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
817 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
818 unsafe { atomic_swap(self.p.get(), ptr as uint, order) as *mut T }
821 /// Stores a value into the pointer if the current value is the same as the expected value.
823 /// If the return value is equal to `old` then the value was updated.
825 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
831 /// use std::sync::atomic::{AtomicPtr, Ordering};
833 /// let ptr = &mut 5i;
834 /// let some_ptr = AtomicPtr::new(ptr);
836 /// let other_ptr = &mut 10i;
837 /// let another_ptr = &mut 10i;
839 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
843 pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
845 atomic_compare_and_swap(self.p.get(), old as uint,
846 new as uint, order) as *mut T
852 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
854 Release => intrinsics::atomic_store_rel(dst, val),
855 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
856 SeqCst => intrinsics::atomic_store(dst, val),
857 Acquire => panic!("there is no such thing as an acquire store"),
858 AcqRel => panic!("there is no such thing as an acquire/release store"),
864 unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
866 Acquire => intrinsics::atomic_load_acq(dst),
867 Relaxed => intrinsics::atomic_load_relaxed(dst),
868 SeqCst => intrinsics::atomic_load(dst),
869 Release => panic!("there is no such thing as a release load"),
870 AcqRel => panic!("there is no such thing as an acquire/release load"),
876 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
878 Acquire => intrinsics::atomic_xchg_acq(dst, val),
879 Release => intrinsics::atomic_xchg_rel(dst, val),
880 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
881 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
882 SeqCst => intrinsics::atomic_xchg(dst, val)
886 /// Returns the old value (like __sync_fetch_and_add).
889 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
891 Acquire => intrinsics::atomic_xadd_acq(dst, val),
892 Release => intrinsics::atomic_xadd_rel(dst, val),
893 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
894 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
895 SeqCst => intrinsics::atomic_xadd(dst, val)
899 /// Returns the old value (like __sync_fetch_and_sub).
902 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
904 Acquire => intrinsics::atomic_xsub_acq(dst, val),
905 Release => intrinsics::atomic_xsub_rel(dst, val),
906 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
907 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
908 SeqCst => intrinsics::atomic_xsub(dst, val)
914 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
916 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
917 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
918 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
919 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
920 SeqCst => intrinsics::atomic_cxchg(dst, old, new),
926 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
928 Acquire => intrinsics::atomic_and_acq(dst, val),
929 Release => intrinsics::atomic_and_rel(dst, val),
930 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
931 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
932 SeqCst => intrinsics::atomic_and(dst, val)
938 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
940 Acquire => intrinsics::atomic_nand_acq(dst, val),
941 Release => intrinsics::atomic_nand_rel(dst, val),
942 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
943 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
944 SeqCst => intrinsics::atomic_nand(dst, val)
951 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
953 Acquire => intrinsics::atomic_or_acq(dst, val),
954 Release => intrinsics::atomic_or_rel(dst, val),
955 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
956 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
957 SeqCst => intrinsics::atomic_or(dst, val)
964 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
966 Acquire => intrinsics::atomic_xor_acq(dst, val),
967 Release => intrinsics::atomic_xor_rel(dst, val),
968 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
969 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
970 SeqCst => intrinsics::atomic_xor(dst, val)
977 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
978 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
979 /// atomic operations X and Y, both operating on some atomic object 'M' such
980 /// that A is sequenced before X, Y is synchronized before B and Y observes
981 /// the change to M. This provides a happens-before dependence between A and B.
983 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
986 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
987 /// and `Release` semantics, participates in the global program order of the
988 /// other `SeqCst` operations and/or fences.
990 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
994 /// Panics if `order` is `Relaxed`.
997 pub fn fence(order: Ordering) {
1000 Acquire => intrinsics::atomic_fence_acq(),
1001 Release => intrinsics::atomic_fence_rel(),
1002 AcqRel => intrinsics::atomic_fence_acqrel(),
1003 SeqCst => intrinsics::atomic_fence(),
1004 Relaxed => panic!("there is no such thing as a relaxed fence")