#[doc(hidden)];
-use compare_and_swap = rustrt::rust_compare_and_swap_ptr;
use task::TaskBuilder;
use task::atomically;
fn rust_task_weaken(ch: rust_port_id);
fn rust_task_unweaken(ch: rust_port_id);
- #[rust_stack]
- fn rust_atomic_increment(p: &mut libc::intptr_t)
- -> libc::intptr_t;
-
- #[rust_stack]
- fn rust_atomic_decrement(p: &mut libc::intptr_t)
- -> libc::intptr_t;
-
#[rust_stack]
fn rust_compare_and_swap_ptr(address: &mut libc::uintptr_t,
oldval: libc::uintptr_t,
fn rust_unlock_little_lock(lock: rust_little_lock);
}
+#[abi = "rust-intrinsic"]
+extern mod rusti {
+
+ #[cfg(stage1)] #[cfg(stage2)] #[cfg(stage3)]
+ fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
+ fn atomic_xadd(dst: &mut int, src: int) -> int;
+ fn atomic_xsub(dst: &mut int, src: int) -> int;
+}
+
#[allow(non_camel_case_types)] // runtime type
type rust_port_id = uint;
type GlobalPtr = *libc::uintptr_t;
+// TODO: Remove once snapshots have atomic_cxchg
+#[cfg(stage0)]
+fn compare_and_swap(address: &mut libc::uintptr_t,
+ oldval: libc::uintptr_t,
+ newval: libc::uintptr_t) -> bool {
+ rustrt::rust_compare_and_swap_ptr(address, oldval, newval)
+}
+
+#[cfg(stage1)]
+#[cfg(stage2)]
+#[cfg(stage3)]
+fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool {
+ let old = rusti::atomic_cxchg(address, oldval, newval);
+ old == oldval
+}
+
/**
* Atomically gets a channel from a pointer to a pointer-sized memory location
* or, if no channel exists creates and installs a new channel and sets up a
log(debug,~"BEFORE COMPARE AND SWAP");
let swapped = compare_and_swap(
cast::reinterpret_cast(&global),
- 0u, cast::reinterpret_cast(&ch));
+ 0, cast::reinterpret_cast(&ch));
log(debug,fmt!("AFTER .. swapped? %?", swapped));
if swapped {
}
do task::unkillable {
let data: ~ArcData<T> = cast::reinterpret_cast(&self.data);
- let new_count = rustrt::rust_atomic_decrement(&mut data.count);
+ let new_count = rusti::atomic_xsub(&mut data.count, 1) - 1;
assert new_count >= 0;
if new_count == 0 {
// Were we really last, or should we hand off to an unwrapper?
// Got in. Step 0: Tell destructor not to run. We are now it.
rc.data = ptr::null();
// Step 1 - drop our own reference.
- let new_count = rustrt::rust_atomic_decrement(&mut ptr.count);
- // assert new_count >= 0;
+ let new_count = rusti::atomic_xsub(&mut ptr.count, 1) - 1;
+ //assert new_count >= 0;
if new_count == 0 {
// We were the last owner. Can unwrap immediately.
// Also we have to free the server endpoints.
-> SharedMutableState<T> {
unsafe {
let ptr: ~ArcData<T> = cast::reinterpret_cast(&(*rc).data);
- let new_count = rustrt::rust_atomic_increment(&mut ptr.count);
+ let new_count = rusti::atomic_xadd(&mut ptr.count, 1) + 1;
assert new_count >= 2;
cast::forget(move ptr);
}
return sync::compare_and_swap(address, oldval, newval);
}
-extern "C" CDECL intptr_t
-rust_atomic_increment(intptr_t *address) {
- return sync::increment(address);
-}
-
-extern "C" CDECL intptr_t
-rust_atomic_decrement(intptr_t *address) {
- return sync::decrement(address);
-}
-
extern "C" CDECL void
rust_task_weaken(rust_port_id chan) {
rust_task *task = rust_get_current_task();
rust_dbg_breakpoint
rust_osmain_sched_id
rust_compare_and_swap_ptr
-rust_atomic_increment
-rust_atomic_decrement
rust_global_env_chan_ptr
rust_port_take
rust_port_drop
rust_uv_ip4_port
rust_uv_ip6_port
rust_uv_tcp_getpeername
-rust_uv_tcp_getpeername6
\ No newline at end of file
+rust_uv_tcp_getpeername6
Name: *c_char) -> ValueRef;
/* Atomic Operations */
+ fn LLVMBuildAtomicCmpXchg(B: BuilderRef, LHS: ValueRef,
+ CMP: ValueRef, RHS: ValueRef,
+ ++Order: AtomicOrdering) -> ValueRef;
fn LLVMBuildAtomicRMW(B: BuilderRef, ++Op: AtomicBinOp,
LHS: ValueRef, RHS: ValueRef,
++Order: AtomicOrdering) -> ValueRef;
}
// Atomic Operations
+fn AtomicCmpXchg(cx: block, dst: ValueRef,
+ cmp: ValueRef, src: ValueRef,
+ order: AtomicOrdering) -> ValueRef {
+ llvm::LLVMBuildAtomicCmpXchg(B(cx), dst, cmp, src, order)
+}
fn AtomicRMW(cx: block, op: AtomicBinOp,
dst: ValueRef, src: ValueRef,
order: AtomicOrdering) -> ValueRef {
Some(substs), Some(item.span));
let mut bcx = top_scope_block(fcx, None), lltop = bcx.llbb;
match ccx.sess.str_of(item.ident) {
+ ~"atomic_cxchg" => {
+ let old = AtomicCmpXchg(bcx,
+ get_param(decl, first_real_arg),
+ get_param(decl, first_real_arg + 1u),
+ get_param(decl, first_real_arg + 2u),
+ SequentiallyConsistent);
+ Store(bcx, old, fcx.llretptr);
+ }
+ ~"atomic_cxchg_acq" => {
+ let old = AtomicCmpXchg(bcx,
+ get_param(decl, first_real_arg),
+ get_param(decl, first_real_arg + 1u),
+ get_param(decl, first_real_arg + 2u),
+ Acquire);
+ Store(bcx, old, fcx.llretptr);
+ }
+ ~"atomic_cxchg_rel" => {
+ let old = AtomicCmpXchg(bcx,
+ get_param(decl, first_real_arg),
+ get_param(decl, first_real_arg + 1u),
+ get_param(decl, first_real_arg + 2u),
+ Release);
+ Store(bcx, old, fcx.llretptr);
+ }
~"atomic_xchg" => {
let old = AtomicRMW(bcx, Xchg,
get_param(decl, first_real_arg),
~"get_tydesc" | ~"needs_drop" => use_tydesc,
- ~"atomic_xchg" | ~"atomic_xadd" |
- ~"atomic_xsub" | ~"atomic_xchg_acq" |
- ~"atomic_xadd_acq" | ~"atomic_xsub_acq" |
- ~"atomic_xchg_rel" | ~"atomic_xadd_rel" |
- ~"atomic_xsub_rel" => 0,
+ ~"atomic_cxchg" | ~"atomic_cxchg_acq"|
+ ~"atomic_cxchg_rel"| ~"atomic_xchg" |
+ ~"atomic_xadd" | ~"atomic_xsub" |
+ ~"atomic_xchg_acq" | ~"atomic_xadd_acq" |
+ ~"atomic_xsub_acq" | ~"atomic_xchg_rel" |
+ ~"atomic_xadd_rel" | ~"atomic_xsub_rel" => 0,
~"visit_tydesc" | ~"forget" | ~"addr_of" |
~"frame_address" | ~"morestack_addr" => 0,
}
~"needs_drop" => (1u, ~[], ty::mk_bool(tcx)),
- ~"atomic_xchg" | ~"atomic_xadd" | ~"atomic_xsub" |
+ ~"atomic_cxchg" | ~"atomic_cxchg_acq"| ~"atomic_cxchg_rel" => {
+ (0u, ~[arg(ast::by_copy,
+ ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)),
+ ty::mk_int(tcx))),
+ arg(ast::by_copy, ty::mk_int(tcx)),
+ arg(ast::by_copy, ty::mk_int(tcx))],
+ ty::mk_int(tcx))
+ }
+ ~"atomic_xchg" | ~"atomic_xadd" | ~"atomic_xsub" |
~"atomic_xchg_acq" | ~"atomic_xadd_acq" | ~"atomic_xsub_acq" |
~"atomic_xchg_rel" | ~"atomic_xadd_rel" | ~"atomic_xsub_rel" => {
(0u, ~[arg(ast::by_copy,
return LLVMMetadataTypeInContext(LLVMGetGlobalContext());
}
+extern "C" LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B,
+ LLVMValueRef target,
+ LLVMValueRef old,
+ LLVMValueRef source,
+ AtomicOrdering order) {
+ return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(target), unwrap(old),
+ unwrap(source), order));
+}
extern "C" LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,
AtomicRMWInst::BinOp op,
LLVMValueRef target,
LLVMBasicBlockAsValue
LLVMBlockAddress
LLVMBuildAShr
+LLVMBuildAtomicCmpXchg
LLVMBuildAtomicRMW
LLVMBuildAdd
LLVMBuildAggregateRet
#[abi = "rust-intrinsic"]
extern mod rusti {
#[legacy_exports];
+ fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
+ fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
+ fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
+
fn atomic_xchg(dst: &mut int, src: int) -> int;
fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
#[abi = "rust-intrinsic"]
extern mod rusti {
#[legacy_exports];
+ fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
+ fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
+ fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
+
fn atomic_xchg(dst: &mut int, src: int) -> int;
fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
fn main() {
let x = ~mut 1;
+ assert rusti::atomic_cxchg(x, 1, 2) == 1;
+ assert *x == 2;
+
+ assert rusti::atomic_cxchg_acq(x, 1, 3) == 2;
+ assert *x == 2;
+
+ assert rusti::atomic_cxchg_rel(x, 2, 1) == 2;
+ assert *x == 1;
+
assert rusti::atomic_xchg(x, 0) == 1;
assert *x == 0;