use rt::sched::Scheduler;
use rt::local::Local;
use rt::select::{Select, SelectPort};
-use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Release, SeqCst};
+use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Relaxed, SeqCst};
use unstable::sync::UnsafeAtomicRcBox;
use util::Void;
use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
}
STATE_ONE => {
// Re-record that we are the only owner of the packet.
- // Release barrier needed in case the task gets reawoken
- // on a different core (this is analogous to writing a
- // payload; a barrier in enqueueing the task protects it).
+ // No barrier needed, even if the task gets reawoken
+ // on a different core -- this is analogous to writing a
+ // payload; a barrier in enqueueing the task protects it.
// NB(#8132). This *must* occur before the enqueue below.
// FIXME(#6842, #8130) This is usually only needed for the
// assertion in recv_ready, except in the case of select().
// This won't actually ever have cacheline contention, but
// maybe should be optimized out with a cfg(test) anyway?
- (*self.packet()).state.store(STATE_ONE, Release);
+ (*self.packet()).state.store(STATE_ONE, Relaxed);
rtdebug!("rendezvous recv");
sched.metrics.rendezvous_recvs += 1;
unsafe {
// See corresponding store() above in block_on for rationale.
// FIXME(#8130) This can happen only in test builds.
- assert!((*packet).state.load(Acquire) == STATE_ONE);
+ assert!((*packet).state.load(Relaxed) == STATE_ONE);
let payload = (*packet).payload.take();
use prelude::*;
use rt::task::Task;
use to_bytes::IterBytes;
-use unstable::atomics::{AtomicUint, Acquire, SeqCst};
+use unstable::atomics::{AtomicUint, Relaxed};
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
use util;
// Letting a KillFlag with a task inside get dropped would leak the task.
// We could free it here, but the task should get awoken by hand somehow.
fn drop(&self) {
- match self.load(Acquire) {
+ match self.load(Relaxed) {
KILL_RUNNING | KILL_KILLED => { },
_ => rtabort!("can't drop kill flag with a blocked task inside!"),
}
Unkillable(task) => Some(task),
Killable(flag_arc) => {
let flag = unsafe { &mut **flag_arc.get() };
- match flag.swap(KILL_RUNNING, SeqCst) {
+ match flag.swap(KILL_RUNNING, Relaxed) {
KILL_RUNNING => None, // woken from select(), perhaps
KILL_KILLED => None, // a killer stole it already
task_ptr =>
let flag = &mut **flag_arc.get();
let task_ptr = cast::transmute(task);
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
- match flag.compare_and_swap(KILL_RUNNING, task_ptr, SeqCst) {
+ match flag.compare_and_swap(KILL_RUNNING, task_ptr, Relaxed) {
KILL_RUNNING => Right(Killable(flag_arc)),
KILL_KILLED => Left(revive_task_ptr(task_ptr, Some(flag_arc))),
x => rtabort!("can't block task! kill flag = %?", x),
let inner = unsafe { &mut *self.get() };
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
- match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, SeqCst) {
+ match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, Relaxed) {
KILL_RUNNING => { }, // normal case
KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
_ => rtabort!("inhibit_kill: task already unkillable"),
let inner = unsafe { &mut *self.get() };
// Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
- match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, SeqCst) {
+ match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, Relaxed) {
KILL_UNKILLABLE => { }, // normal case
KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
_ => rtabort!("allow_kill: task already killable"),
// if it was blocked and needs punted awake. To be called by other tasks.
pub fn kill(&mut self) -> Option<~Task> {
let inner = unsafe { &mut *self.get() };
- if inner.unkillable.swap(KILL_KILLED, SeqCst) == KILL_RUNNING {
+ if inner.unkillable.swap(KILL_KILLED, Relaxed) == KILL_RUNNING {
// Got in. Allowed to try to punt the task awake.
let flag = unsafe { &mut *inner.killed.get() };
- match flag.swap(KILL_KILLED, SeqCst) {
+ match flag.swap(KILL_KILLED, Relaxed) {
// Task either not blocked or already taken care of.
KILL_RUNNING | KILL_KILLED => None,
// Got ownership of the blocked task.
// is unkillable with a kill signal pending.
let inner = unsafe { &*self.get() };
let flag = unsafe { &*inner.killed.get() };
- // FIXME(#6598): can use relaxed ordering (i think)
- flag.load(Acquire) == KILL_KILLED
+ // A barrier-related concern here is that a task that gets killed
+ // awake needs to see the killer's write of KILLED to this flag. This
+ // is analogous to receiving a pipe payload; the appropriate barrier
+ // should happen when enqueueing the task.
+ flag.load(Relaxed) == KILL_KILLED
}
pub fn notify_immediate_failure(&mut self) {
use option::*;
use either::{Either, Left, Right};
use task;
-use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
+use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
use unstable::finally::Finally;
use ops::Drop;
use clone::Clone;
pub fn get(&self) -> *mut T {
unsafe {
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
- // FIXME(#6598) Change Acquire to Relaxed.
- assert!(data.count.load(Acquire) > 0);
+ assert!(data.count.load(Relaxed) > 0);
let r: *mut T = data.data.get_mut_ref();
cast::forget(data);
return r;
pub fn get_immut(&self) -> *T {
unsafe {
let data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
- assert!(data.count.load(Acquire) > 0); // no barrier is really needed
+ assert!(data.count.load(Relaxed) > 0);
let r: *T = data.data.get_ref();
cast::forget(data);
return r;
// Try to put our server end in the unwrapper slot.
// This needs no barrier -- it's protected by the release barrier on
// the xadd, and the acquire+release barrier in the destructor's xadd.
- // FIXME(#6598) Change Acquire to Relaxed.
- if data.unwrapper.fill(~(c1,p2), Acquire).is_none() {
+ if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
// Got in. Tell this handle's destructor not to run (we are now it).
this.data = ptr::mut_null();
// Drop our own reference.