1 //@ignore-target-windows: Concurrency on Windows is not supported yet.
2 //@compile-flags: -Zmiri-ignore-leaks
4 // Tests operations not perfomable through C++'s atomic API
5 // but doable in safe (at least sound) Rust.
7 #![feature(atomic_from_mut)]
9 use std::sync::atomic::Ordering::*;
10 use std::sync::atomic::{AtomicU16, AtomicU32};
11 use std::thread::spawn;
13 fn static_atomic_mut(val: u32) -> &'static mut AtomicU32 {
14 let ret = Box::leak(Box::new(AtomicU32::new(val)));
18 fn split_u32(dword: &mut u32) -> &mut [u16; 2] {
19 unsafe { std::mem::transmute::<&mut u32, &mut [u16; 2]>(dword) }
23 let mut x = AtomicU32::new(0);
25 let old_x = std::mem::replace(&mut x, AtomicU32::new(42));
27 assert_eq!(x.load(Relaxed), 42);
28 assert_eq!(old_x.load(Relaxed), 0);
32 let x = static_atomic_mut(0);
35 *x = AtomicU32::new(2);
37 assert_eq!(x.load(Relaxed), 2);
41 let x = static_atomic_mut(0);
44 let x_mut = x.get_mut();
48 let j1 = spawn(move || x.load(Relaxed));
50 let r1 = j1.join().unwrap();
54 // This is technically doable in C++ with atomic_ref
55 // but little literature exists atm on its involvement
56 // in mixed size/atomicity accesses
61 let x_atomic = AtomicU32::from_mut(&mut x);
62 x_atomic.store(u32::from_be(0xabbafafa), Relaxed);
65 // Split the `AtomicU32` into two `AtomicU16`.
66 // Crucially, there is no non-atomic access to `x`! All accesses are atomic, but of different size.
67 let (x_hi, x_lo) = split_u32(&mut x).split_at_mut(1);
69 let x_hi_atomic = AtomicU16::from_mut(&mut x_hi[0]);
70 let x_lo_atomic = AtomicU16::from_mut(&mut x_lo[0]);
72 assert_eq!(x_hi_atomic.load(Relaxed), u16::from_be(0xabba));
73 assert_eq!(x_lo_atomic.load(Relaxed), u16::from_be(0xfafa));