1 //@ignore-target-windows: Concurrency on Windows is not supported yet.
2 //@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows
4 // The following tests check whether our weak memory emulation produces
5 // any inconsistent execution outcomes
7 // Due to the random nature of choosing valid stores, it is always
8 // possible that our tests spuriously succeeds: even though our weak
9 // memory emulation code has incorrectly identified a store in
10 // modification order as being valid, it may be never chosen by
11 // the RNG and never observed in our tests.
13 // To mitigate this, each test is ran enough times such that the chance
14 // of spurious success is very low. These tests never supriously fail.
16 // Test cases and their consistent outcomes are from
17 // http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/
19 // M. Batty, S. Owens, S. Sarkar, P. Sewell and T. Weber,
20 // "Mathematizing C++ concurrency", ACM SIGPLAN Notices, vol. 46, no. 1, pp. 55-66, 2011.
21 // Available: https://ss265.host.cs.st-andrews.ac.uk/papers/n3132.pdf.
23 use std::sync::atomic::Ordering::*;
24 use std::sync::atomic::{fence, AtomicBool, AtomicI32};
25 use std::thread::spawn;
27 #[derive(Copy, Clone)]
28 struct EvilSend<T>(pub T);
30 unsafe impl<T> Send for EvilSend<T> {}
31 unsafe impl<T> Sync for EvilSend<T> {}
33 // We can't create static items because we need to run each test
35 fn static_atomic(val: i32) -> &'static AtomicI32 {
36 let ret = Box::leak(Box::new(AtomicI32::new(val)));
37 ret.store(val, Relaxed); // work around https://github.com/rust-lang/miri/issues/2164
40 fn static_atomic_bool(val: bool) -> &'static AtomicBool {
41 let ret = Box::leak(Box::new(AtomicBool::new(val)));
42 ret.store(val, Relaxed); // work around https://github.com/rust-lang/miri/issues/2164
46 // Spins until it acquires a pre-determined value.
47 fn acquires_value(loc: &AtomicI32, val: i32) -> i32 {
48 while loc.load(Acquire) != val {
49 std::hint::spin_loop();
55 let x = static_atomic(0);
56 let y = static_atomic(0);
58 let j1 = spawn(move || {
64 let j2 = spawn(move || {
65 let r2 = x.load(Relaxed); // -------------------------------------+
66 y.store(1, Release); // ---------------------+ |
69 #[rustfmt::skip] // |synchronizes-with |happens-before
70 let j3 = spawn(move || { // | |
71 acquires_value(&y, 1); // <------------------+ |
72 x.load(Relaxed) // <----------------------------------------------+
73 // The two reads on x are ordered by hb, so they cannot observe values
74 // differently from the modification order. If the first read observed
75 // 2, then the second read must observe 2 as well.
79 let r2 = j2.join().unwrap();
80 let r3 = j3.join().unwrap();
87 let x = static_atomic(0);
88 let y = static_atomic(0);
91 let j1 = spawn(move || {
92 x.store(1, Release); // ---------------------+---------------------+
94 #[rustfmt::skip] // |synchronizes-with |
95 let j2 = spawn(move || { // | |
96 acquires_value(&x, 1); // <------------------+ |
97 y.store(1, Release); // ---------------------+ |happens-before
99 #[rustfmt::skip] // |synchronizes-with |
100 let j3 = spawn(move || { // | |
101 acquires_value(&y, 1); // <------------------+ |
102 x.load(Relaxed) // <-----------------------------------------------+
107 let r3 = j3.join().unwrap();
112 fn test_message_passing() {
114 let ptr = &mut var as *mut u32;
115 let x = EvilSend(ptr);
116 let y = static_atomic(0);
119 let j1 = spawn(move || {
120 unsafe { *x.0 = 1 }; // -----------------------------------------+
121 y.store(1, Release); // ---------------------+ |
123 #[rustfmt::skip] // |synchronizes-with | happens-before
124 let j2 = spawn(move || { // | |
125 acquires_value(&y, 1); // <------------------+ |
126 unsafe { *x.0 } // <---------------------------------------------+
130 let r2 = j2.join().unwrap();
135 // LB+acq_rel+acq_rel
136 fn test_load_buffering_acq_rel() {
137 let x = static_atomic(0);
138 let y = static_atomic(0);
139 let j1 = spawn(move || {
140 let r1 = x.load(Acquire);
145 let j2 = spawn(move || {
146 let r2 = y.load(Acquire);
151 let r1 = j1.join().unwrap();
152 let r2 = j2.join().unwrap();
154 // 3 consistent outcomes: (0,0), (0,1), (1,0)
155 assert_ne!((r1, r2), (1, 1));
158 fn test_mixed_access() {
163 x.store(1, mo_relaxed);
166 x.store(2, mo_relaxed);
169 r1 = x.load(mo_relaxed);
175 let x = static_atomic(0);
185 let r2 = spawn(move || x.load(Relaxed)).join().unwrap();
190 // The following two tests are taken from Repairing Sequential Consistency in C/C++11
192 // https://plv.mpi-sws.org/scfix/paper.pdf
195 fn test_sc_store_buffering() {
196 let x = static_atomic(0);
197 let y = static_atomic(0);
199 let j1 = spawn(move || {
204 let j2 = spawn(move || {
209 let a = j1.join().unwrap();
210 let b = j2.join().unwrap();
212 assert_ne!((a, b), (0, 0));
215 fn test_single_thread() {
216 let x = AtomicI32::new(42);
218 assert_eq!(x.load(Relaxed), 42);
220 x.store(43, Relaxed);
222 assert_eq!(x.load(Relaxed), 43);
225 fn test_sync_through_rmw_and_fences() {
226 // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905
228 pub fn rdmw(storing: &AtomicI32, sync: &AtomicI32, loading: &AtomicI32) -> i32 {
229 storing.store(1, Relaxed);
231 sync.fetch_add(0, Relaxed);
233 loading.load(Relaxed)
236 let x = static_atomic(0);
237 let y = static_atomic(0);
238 let z = static_atomic(0);
240 // Since each thread is so short, we need to make sure that they truely run at the same time
241 // Otherwise t1 will finish before t2 even starts
242 let go = static_atomic_bool(false);
244 let t1 = spawn(move || {
245 while !go.load(Relaxed) {}
249 let t2 = spawn(move || {
250 while !go.load(Relaxed) {}
254 go.store(true, Relaxed);
256 let a = t1.join().unwrap();
257 let b = t2.join().unwrap();
258 assert_ne!((a, b), (0, 0));
263 test_single_thread();
265 test_load_buffering_acq_rel();
266 test_message_passing();
269 test_sc_store_buffering();
270 test_sync_through_rmw_and_fences();