pub fn LLVMIsTailCall(CallInst: ValueRef) -> Bool;
pub fn LLVMSetTailCall(CallInst: ValueRef, IsTailCall: Bool);
+ /* Operations on load/store instructions (only) */
+ pub fn LLVMGetVolatile(MemoryAccessInst: ValueRef) -> Bool;
+ pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
+
/* Operations on phi nodes */
pub fn LLVMAddIncoming(PhiNode: ValueRef,
IncomingValues: *ValueRef,
}
}
+pub fn VolatileLoad(cx: &Block, PointerVal: ValueRef) -> ValueRef {
+ unsafe {
+ if cx.unreachable.get() { return llvm::LLVMGetUndef(Type::nil().to_ref()); }
+ B(cx).volatile_load(PointerVal)
+ }
+}
+
pub fn AtomicLoad(cx: &Block, PointerVal: ValueRef, order: AtomicOrdering) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
B(cx).store(Val, Ptr)
}
+pub fn VolatileStore(cx: &Block, Val: ValueRef, Ptr: ValueRef) {
+ if cx.unreachable.get() { return; }
+ B(cx).volatile_store(Val, Ptr)
+}
+
pub fn AtomicStore(cx: &Block, Val: ValueRef, Ptr: ValueRef, order: AtomicOrdering) {
if cx.unreachable.get() { return; }
B(cx).atomic_store(Val, Ptr, order)
}
}
+ pub fn volatile_load(&self, ptr: ValueRef) -> ValueRef {
+ self.count_insn("load.volatile");
+ unsafe {
+ let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
+ llvm::LLVMSetVolatile(insn, lib::llvm::True);
+ insn
+ }
+ }
+
pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
self.count_insn("load.atomic");
unsafe {
}
}
+ pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
+ debug!("Store {} -> {}",
+ self.ccx.tn.val_to_str(val),
+ self.ccx.tn.val_to_str(ptr));
+ assert!(is_not_null(self.llbuilder));
+ self.count_insn("store.volatile");
+ unsafe {
+ let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
+ llvm::LLVMSetVolatile(insn, lib::llvm::True);
+ }
+ }
+
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
self.ccx.tn.val_to_str(val),
}
}
+ fn volatile_load_intrinsic(bcx: @Block) {
+ let first_real_arg = bcx.fcx.arg_pos(0u);
+ let src = get_param(bcx.fcx.llfn, first_real_arg);
+
+ let val = VolatileLoad(bcx, src);
+ Ret(bcx, val);
+ }
+
+ fn volatile_store_intrinsic(bcx: @Block) {
+ let first_real_arg = bcx.fcx.arg_pos(0u);
+ let dst = get_param(bcx.fcx.llfn, first_real_arg);
+ let val = get_param(bcx.fcx.llfn, first_real_arg + 1);
+
+ VolatileStore(bcx, val, dst);
+ RetVoid(bcx);
+ }
+
fn copy_intrinsic(bcx: @Block, allow_overlap: bool, tp_ty: ty::t) {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
"bswap32" => simple_llvm_intrinsic(bcx, "llvm.bswap.i32", 1),
"bswap64" => simple_llvm_intrinsic(bcx, "llvm.bswap.i64", 1),
+ "volatile_load" => volatile_load_intrinsic(bcx),
+ "volatile_store" => volatile_store_intrinsic(bcx),
+
"i8_add_with_overflow" =>
with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i8", output_type),
"i16_add_with_overflow" =>
"bswap32" => (0, ~[ ty::mk_i32() ], ty::mk_i32()),
"bswap64" => (0, ~[ ty::mk_i64() ], ty::mk_i64()),
+ "volatile_load" =>
+ (1, ~[ ty::mk_imm_ptr(tcx, param(ccx, 0)) ], param(ccx, 0)),
+ "volatile_store" =>
+ (1, ~[ ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ], ty::mk_nil()),
+
"i8_add_with_overflow" | "i8_sub_with_overflow" | "i8_mul_with_overflow" =>
(0, ~[ty::mk_i8(), ty::mk_i8()],
ty::mk_tup(tcx, ~[ty::mk_i8(), ty::mk_bool()])),
The corresponding definitions are in librustc/middle/trans/foreign.rs.
+# Volatiles
+
+The volatile intrinsics provide operations intended to act on I/O
+memory, which are guaranteed to not be reordered by the compiler
+across other volatile intrinsics. See the LLVM documentation on
+[[volatile]].
+
+[volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
+
# Atomics
The atomic intrinsics provide common atomic operations on machine
/// Execute a breakpoint trap, for inspection by a debugger.
pub fn breakpoint();
+ #[cfg(not(stage0))] pub fn volatile_load<T>(src: *T) -> T;
+ #[cfg(not(stage0))] pub fn volatile_store<T>(dst: *mut T, val: T);
+
/// Atomic compare and exchange, sequentially consistent.
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
/// Atomic compare and exchange, acquire ordering.
--- /dev/null
+-include ../tools.mk
+
+all:
+ # The tests must pass...
+ $(RUSTC) main.rs
+ $(call RUN,main)
+ # ... and the loads/stores must not be optimized out.
+ $(RUSTC) main.rs --emit-llvm -S
+ grep "load volatile" $(TMPDIR)/main.ll
+ grep "store volatile" $(TMPDIR)/main.ll
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::unstable::intrinsics::{volatile_load, volatile_store};
+
+pub fn main() {
+ unsafe {
+ let mut i : int = 1;
+ volatile_store(&mut i, 2);
+ assert_eq!(volatile_load(&i), 2);
+ }
+}