1 // assembly-output: ptx-linker
2 // compile-flags: --crate-type cdylib
6 #![feature(abi_ptx, core_intrinsics)]
9 use core::intrinsics::*;
11 // aux-build: breakpoint-panic-handler.rs
12 extern crate breakpoint_panic_handler;
14 // Currently, LLVM NVPTX backend can only emit atomic instructions with
15 // `relaxed` (PTX default) ordering. But it's also useful to make sure
16 // the backend won't fail with other orders. Apparently, the backend
17 // doesn't support fences as well. As a workaround `llvm.nvvm.membar.*`
18 // could work, and perhaps on the long run, all the atomic operations
19 // should rather be provided by `core::arch::nvptx`.
21 // Also, PTX ISA doesn't have atomic `load`, `store` and `nand`.
23 // FIXME(denzp): add tests for `core::sync::atomic::*`.
26 pub unsafe extern "ptx-kernel" fn atomics_kernel(a: *mut u32) {
27 // CHECK: atom.global.and.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
28 // CHECK: atom.global.and.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
30 atomic_and_relaxed(a, 1);
32 // CHECK: atom.global.cas.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1, 2;
33 // CHECK: atom.global.cas.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1, 2;
34 atomic_cxchg(a, 1, 2);
35 atomic_cxchg_relaxed(a, 1, 2);
37 // CHECK: atom.global.max.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
38 // CHECK: atom.global.max.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
40 atomic_max_relaxed(a, 1);
42 // CHECK: atom.global.min.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
43 // CHECK: atom.global.min.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
45 atomic_min_relaxed(a, 1);
47 // CHECK: atom.global.or.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
48 // CHECK: atom.global.or.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
50 atomic_or_relaxed(a, 1);
52 // CHECK: atom.global.max.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
53 // CHECK: atom.global.max.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
55 atomic_umax_relaxed(a, 1);
57 // CHECK: atom.global.min.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
58 // CHECK: atom.global.min.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
60 atomic_umin_relaxed(a, 1);
62 // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
63 // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
65 atomic_xadd_relaxed(a, 1);
67 // CHECK: atom.global.exch.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
68 // CHECK: atom.global.exch.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
70 atomic_xchg_relaxed(a, 1);
72 // CHECK: atom.global.xor.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
73 // CHECK: atom.global.xor.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1;
75 atomic_xor_relaxed(a, 1);
77 // CHECK: mov.u32 %[[sub_0_arg:r[0-9]+]], 100;
78 // CHECK: neg.s32 temp, %[[sub_0_arg]];
79 // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], temp;
82 // CHECK: mov.u32 %[[sub_1_arg:r[0-9]+]], 200;
83 // CHECK: neg.s32 temp, %[[sub_1_arg]];
84 // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], temp;
85 atomic_xsub_relaxed(a, 200);