/// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html).
pub fn volatile_store<T>(dst: *mut T, val: T);
+ /// Perform a volatile load from the `src` pointer
+ /// The pointer is not required to be aligned.
+ #[cfg(not(stage0))]
+ pub fn unaligned_volatile_load<T>(src: *const T) -> T;
+ /// Perform a volatile store to the `dst` pointer.
+ /// The pointer is not required to be aligned.
+ #[cfg(not(stage0))]
+ pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
+
/// Returns the square root of an `f32`
pub fn sqrtf32(x: f32) -> f32;
/// Returns the square root of an `f64`
pub struct MemFlags: u8 {
const VOLATILE = 1 << 0;
const NONTEMPORAL = 1 << 1;
+ const UNALIGNED = 1 << 2;
}
}
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
- llvm::LLVMSetAlignment(store, align.abi() as c_uint);
+ let align = if flags.contains(MemFlags::UNALIGNED) {
+ 1
+ } else {
+ align.abi() as c_uint
+ };
+ llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
llvm::LLVMSetVolatile(store, llvm::True);
}
memset_intrinsic(bx, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
}
- "volatile_load" => {
+ "volatile_load" | "unaligned_volatile_load" => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_ty.ret.mode {
ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
}
let load = bx.volatile_load(ptr);
+ let align = if name == "unaligned_volatile_load" {
+ 1
+ } else {
+ cx.align_of(tp_ty).abi() as u32
+ };
unsafe {
- llvm::LLVMSetAlignment(load, cx.align_of(tp_ty).abi() as u32);
+ llvm::LLVMSetAlignment(load, align);
}
to_immediate(bx, load, cx.layout_of(tp_ty))
},
args[1].val.volatile_store(bx, dst);
return;
},
+ "unaligned_volatile_store" => {
+ let dst = args[0].deref(bx.cx);
+ args[1].val.unaligned_volatile_store(bx, dst);
+ return;
+ },
"prefetch_read_data" | "prefetch_write_data" |
"prefetch_read_instruction" | "prefetch_write_instruction" => {
let expect = cx.get_intrinsic(&("llvm.prefetch"));
self.store_with_flags(bx, dest, MemFlags::VOLATILE);
}
+ pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
+ }
+
pub fn nontemporal_store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) {
self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
}
"roundf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"roundf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
- "volatile_load" =>
+ "volatile_load" | "unaligned_volatile_load" =>
(1, vec![ tcx.mk_imm_ptr(param(0)) ], param(0)),
- "volatile_store" =>
+ "volatile_store" | "unaligned_volatile_store" =>
(1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_nil()),
"ctpop" | "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" |
#![feature(core_intrinsics, volatile)]
-use std::intrinsics::{volatile_load, volatile_store};
+use std::intrinsics::{
+ unaligned_volatile_load, unaligned_volatile_store, volatile_load, volatile_store,
+};
use std::ptr::{read_volatile, write_volatile};
pub fn main() {
unsafe {
- let mut i : isize = 1;
+ let mut i: isize = 1;
volatile_store(&mut i, 2);
assert_eq!(volatile_load(&i), 2);
}
unsafe {
- let mut i : isize = 1;
+ let mut i: isize = 1;
+ unaligned_volatile_store(&mut i, 2);
+ assert_eq!(unaligned_volatile_load(&i), 2);
+ }
+ unsafe {
+ let mut i: isize = 1;
write_volatile(&mut i, 2);
assert_eq!(read_volatile(&i), 2);
}