1 //@compile-flags: -Zmiri-strict-provenance
2 #![feature(strict_provenance, strict_provenance_atomic_ptr)]
3 use std::sync::atomic::{
4 compiler_fence, fence, AtomicBool, AtomicIsize, AtomicPtr, AtomicU64, Ordering::*,
13 weak_sometimes_fails();
17 static mut ATOMIC: AtomicBool = AtomicBool::new(false);
20 assert_eq!(*ATOMIC.get_mut(), false);
21 ATOMIC.store(true, SeqCst);
22 assert_eq!(*ATOMIC.get_mut(), true);
23 ATOMIC.fetch_or(false, SeqCst);
24 assert_eq!(*ATOMIC.get_mut(), true);
25 ATOMIC.fetch_and(false, SeqCst);
26 assert_eq!(*ATOMIC.get_mut(), false);
27 ATOMIC.fetch_nand(true, SeqCst);
28 assert_eq!(*ATOMIC.get_mut(), true);
29 ATOMIC.fetch_xor(true, SeqCst);
30 assert_eq!(*ATOMIC.get_mut(), false);
34 // There isn't a trait to use to make this generic, so just use a macro
35 macro_rules! compare_exchange_weak_loop {
36 ($atom:expr, $from:expr, $to:expr, $succ_order:expr, $fail_order:expr) => {
38 match $atom.compare_exchange_weak($from, $to, $succ_order, $fail_order) {
43 Err(n) => assert_eq!(n, $from),
49 /// Make sure we can handle all the intrinsics
51 static ATOMIC: AtomicIsize = AtomicIsize::new(0);
52 static ATOMIC_UNSIGNED: AtomicU64 = AtomicU64::new(0);
54 let load_orders = [Relaxed, Acquire, SeqCst];
55 let stored_orders = [Relaxed, Release, SeqCst];
56 let rmw_orders = [Relaxed, Release, Acquire, AcqRel, SeqCst];
59 for o in load_orders {
64 for o in stored_orders {
71 ATOMIC.fetch_or(0, o);
72 ATOMIC.fetch_xor(0, o);
73 ATOMIC.fetch_and(0, o);
74 ATOMIC.fetch_nand(0, o);
75 ATOMIC.fetch_add(0, o);
76 ATOMIC.fetch_sub(0, o);
77 ATOMIC.fetch_min(0, o);
78 ATOMIC.fetch_max(0, o);
79 ATOMIC_UNSIGNED.fetch_min(0, o);
80 ATOMIC_UNSIGNED.fetch_max(0, o);
83 // RMWs with separate failure ordering
84 for o1 in rmw_orders {
85 for o2 in load_orders {
86 let _res = ATOMIC.compare_exchange(0, 0, o1, o2);
87 let _res = ATOMIC.compare_exchange_weak(0, 0, o1, o2);
93 static ATOMIC: AtomicU64 = AtomicU64::new(0);
95 ATOMIC.store(1, SeqCst);
96 assert_eq!(ATOMIC.compare_exchange(0, 0x100, AcqRel, Acquire), Err(1));
97 assert_eq!(ATOMIC.compare_exchange(0, 1, Release, Relaxed), Err(1));
98 assert_eq!(ATOMIC.compare_exchange(1, 0, AcqRel, Relaxed), Ok(1));
99 assert_eq!(ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed), Ok(0));
100 compare_exchange_weak_loop!(ATOMIC, 1, 0x100, AcqRel, Acquire);
101 assert_eq!(ATOMIC.compare_exchange_weak(0, 2, Acquire, Relaxed), Err(0x100));
102 assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed), Err(0x100));
103 assert_eq!(ATOMIC.load(Relaxed), 0x100);
105 assert_eq!(ATOMIC.fetch_max(0x10, SeqCst), 0x100);
106 assert_eq!(ATOMIC.fetch_max(0x100, SeqCst), 0x100);
107 assert_eq!(ATOMIC.fetch_max(0x1000, SeqCst), 0x100);
108 assert_eq!(ATOMIC.fetch_max(0x1000, SeqCst), 0x1000);
109 assert_eq!(ATOMIC.fetch_max(0x2000, SeqCst), 0x1000);
110 assert_eq!(ATOMIC.fetch_max(0x2000, SeqCst), 0x2000);
112 assert_eq!(ATOMIC.fetch_min(0x2000, SeqCst), 0x2000);
113 assert_eq!(ATOMIC.fetch_min(0x2000, SeqCst), 0x2000);
114 assert_eq!(ATOMIC.fetch_min(0x1000, SeqCst), 0x2000);
115 assert_eq!(ATOMIC.fetch_min(0x1000, SeqCst), 0x1000);
116 assert_eq!(ATOMIC.fetch_min(0x100, SeqCst), 0x1000);
117 assert_eq!(ATOMIC.fetch_min(0x10, SeqCst), 0x100);
125 compiler_fence(SeqCst);
126 compiler_fence(Release);
127 compiler_fence(Acquire);
128 compiler_fence(AcqRel);
133 let array: Vec<i32> = (0..100).into_iter().collect(); // a target to point to, to test provenance things
134 let x = array.as_ptr() as *mut i32;
136 let ptr = AtomicPtr::<i32>::new(ptr::null_mut());
137 assert!(ptr.load(Relaxed).addr() == 0);
138 ptr.store(ptr::invalid_mut(13), SeqCst);
139 assert!(ptr.swap(x, Relaxed).addr() == 13);
140 unsafe { assert!(*ptr.load(Acquire) == 0) };
142 // comparison ignores provenance
144 ptr.compare_exchange(
145 (&mut 0 as *mut i32).with_addr(x.addr()),
155 ptr.compare_exchange(
156 (&mut 0 as *mut i32).with_addr(x.addr()),
165 ptr.store(x, Relaxed);
167 assert_eq!(ptr.fetch_ptr_add(13, AcqRel).addr(), x.addr());
168 unsafe { assert_eq!(*ptr.load(SeqCst), 13) }; // points to index 13 now
169 assert_eq!(ptr.fetch_ptr_sub(4, AcqRel).addr(), x.addr() + 13 * 4);
170 unsafe { assert_eq!(*ptr.load(SeqCst), 9) };
171 assert_eq!(ptr.fetch_or(3, AcqRel).addr(), x.addr() + 9 * 4); // ptr is 4-aligned, so set the last 2 bits
172 assert_eq!(ptr.fetch_and(!3, AcqRel).addr(), (x.addr() + 9 * 4) | 3); // and unset them again
173 unsafe { assert_eq!(*ptr.load(SeqCst), 9) };
174 assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr(), x.addr() + 9 * 4);
175 assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr(), (x.addr() + 9 * 4) ^ 0xdeadbeef);
176 unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; // after XORing twice with the same thing, we get our ptr back
179 fn weak_sometimes_fails() {
180 let atomic = AtomicBool::new(false);
183 let cur = atomic.load(Relaxed);
184 // Try (weakly) to flip the flag.
185 if atomic.compare_exchange_weak(cur, !cur, Relaxed, Relaxed).is_err() {
186 // We failed, so return and skip the panic.
190 panic!("compare_exchange_weak succeeded {} tries in a row", tries);