]> git.lizzy.rs Git - rust.git/commitdiff
Relax some atomic barriers. Loosen up all that tension. There, doesn't that feel...
authorBen Blum <bblum@andrew.cmu.edu>
Wed, 31 Jul 2013 19:56:18 +0000 (15:56 -0400)
committerBen Blum <bblum@andrew.cmu.edu>
Thu, 1 Aug 2013 20:52:37 +0000 (16:52 -0400)
src/libstd/rt/comm.rs
src/libstd/rt/kill.rs
src/libstd/unstable/sync.rs

index a27ff559b2ba10cad8a8892c0fa1ec7e9deebc4e..11ba42f805b55b9c1ec52eaa7c69a94fba6b415c 100644 (file)
@@ -18,7 +18,7 @@
 use rt::sched::Scheduler;
 use rt::local::Local;
 use rt::select::{Select, SelectPort};
-use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Release, SeqCst};
+use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Relaxed, SeqCst};
 use unstable::sync::UnsafeAtomicRcBox;
 use util::Void;
 use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
@@ -216,15 +216,15 @@ fn block_on(&mut self, sched: &mut Scheduler, task: BlockedTask) -> bool {
                 }
                 STATE_ONE => {
                     // Re-record that we are the only owner of the packet.
-                    // Release barrier needed in case the task gets reawoken
-                    // on a different core (this is analogous to writing a
-                    // payload; a barrier in enqueueing the task protects it).
+                    // No barrier needed, even if the task gets reawoken
+                    // on a different core -- this is analogous to writing a
+                    // payload; a barrier in enqueueing the task protects it.
                     // NB(#8132). This *must* occur before the enqueue below.
                     // FIXME(#6842, #8130) This is usually only needed for the
                     // assertion in recv_ready, except in the case of select().
                     // This won't actually ever have cacheline contention, but
                     // maybe should be optimized out with a cfg(test) anyway?
-                    (*self.packet()).state.store(STATE_ONE, Release);
+                    (*self.packet()).state.store(STATE_ONE, Relaxed);
 
                     rtdebug!("rendezvous recv");
                     sched.metrics.rendezvous_recvs += 1;
@@ -299,7 +299,7 @@ fn recv_ready(self) -> Option<T> {
         unsafe {
             // See corresponding store() above in block_on for rationale.
             // FIXME(#8130) This can happen only in test builds.
-            assert!((*packet).state.load(Acquire) == STATE_ONE);
+            assert!((*packet).state.load(Relaxed) == STATE_ONE);
 
             let payload = (*packet).payload.take();
 
index c2571f171a17257a09d0539875eb4aa3ac850edf..729c682dbd114602369b63baa202bcbcb608ba85 100644 (file)
@@ -17,7 +17,7 @@
 use prelude::*;
 use rt::task::Task;
 use to_bytes::IterBytes;
-use unstable::atomics::{AtomicUint, Acquire, SeqCst};
+use unstable::atomics::{AtomicUint, Relaxed};
 use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
 use util;
 
@@ -95,7 +95,7 @@ impl Drop for KillFlag {
     // Letting a KillFlag with a task inside get dropped would leak the task.
     // We could free it here, but the task should get awoken by hand somehow.
     fn drop(&self) {
-        match self.load(Acquire) {
+        match self.load(Relaxed) {
             KILL_RUNNING | KILL_KILLED => { },
             _ => rtabort!("can't drop kill flag with a blocked task inside!"),
         }
@@ -124,7 +124,7 @@ pub fn wake(self) -> Option<~Task> {
             Unkillable(task) => Some(task),
             Killable(flag_arc) => {
                 let flag = unsafe { &mut **flag_arc.get() };
-                match flag.swap(KILL_RUNNING, SeqCst) {
+                match flag.swap(KILL_RUNNING, Relaxed) {
                     KILL_RUNNING => None, // woken from select(), perhaps
                     KILL_KILLED  => None, // a killer stole it already
                     task_ptr     =>
@@ -159,7 +159,7 @@ pub fn try_block(mut task: ~Task) -> Either<~Task, BlockedTask> {
                 let flag     = &mut **flag_arc.get();
                 let task_ptr = cast::transmute(task);
                 // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
-                match flag.compare_and_swap(KILL_RUNNING, task_ptr, SeqCst) {
+                match flag.compare_and_swap(KILL_RUNNING, task_ptr, Relaxed) {
                     KILL_RUNNING => Right(Killable(flag_arc)),
                     KILL_KILLED  => Left(revive_task_ptr(task_ptr, Some(flag_arc))),
                     x            => rtabort!("can't block task! kill flag = %?", x),
@@ -257,7 +257,7 @@ pub fn inhibit_kill(&mut self, already_failing: bool) {
         let inner = unsafe { &mut *self.get() };
         // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
         // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
-        match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, SeqCst) {
+        match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, Relaxed) {
             KILL_RUNNING    => { }, // normal case
             KILL_KILLED     => if !already_failing { fail!(KILLED_MSG) },
             _               => rtabort!("inhibit_kill: task already unkillable"),
@@ -270,7 +270,7 @@ pub fn allow_kill(&mut self, already_failing: bool) {
         let inner = unsafe { &mut *self.get() };
         // Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
         // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
-        match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, SeqCst) {
+        match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, Relaxed) {
             KILL_UNKILLABLE => { }, // normal case
             KILL_KILLED     => if !already_failing { fail!(KILLED_MSG) },
             _               => rtabort!("allow_kill: task already killable"),
@@ -281,10 +281,10 @@ pub fn allow_kill(&mut self, already_failing: bool) {
     // if it was blocked and needs punted awake. To be called by other tasks.
     pub fn kill(&mut self) -> Option<~Task> {
         let inner = unsafe { &mut *self.get() };
-        if inner.unkillable.swap(KILL_KILLED, SeqCst) == KILL_RUNNING {
+        if inner.unkillable.swap(KILL_KILLED, Relaxed) == KILL_RUNNING {
             // Got in. Allowed to try to punt the task awake.
             let flag = unsafe { &mut *inner.killed.get() };
-            match flag.swap(KILL_KILLED, SeqCst) {
+            match flag.swap(KILL_KILLED, Relaxed) {
                 // Task either not blocked or already taken care of.
                 KILL_RUNNING | KILL_KILLED => None,
                 // Got ownership of the blocked task.
@@ -306,8 +306,11 @@ pub fn killed(&self) -> bool {
         // is unkillable with a kill signal pending.
         let inner = unsafe { &*self.get() };
         let flag  = unsafe { &*inner.killed.get() };
-        // FIXME(#6598): can use relaxed ordering (i think)
-        flag.load(Acquire) == KILL_KILLED
+        // A barrier-related concern here is that a task that gets killed
+        // awake needs to see the killer's write of KILLED to this flag. This
+        // is analogous to receiving a pipe payload; the appropriate barrier
+        // should happen when enqueueing the task.
+        flag.load(Relaxed) == KILL_KILLED
     }
 
     pub fn notify_immediate_failure(&mut self) {
index e865d3a467dd4d4221dbe69e72c3749673387966..04ada5ec07be699f8a2db74b69040c780fac359c 100644 (file)
@@ -16,7 +16,7 @@
 use option::*;
 use either::{Either, Left, Right};
 use task;
-use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
+use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
 use unstable::finally::Finally;
 use ops::Drop;
 use clone::Clone;
@@ -95,8 +95,7 @@ pub fn cloneN(self, num_handles: uint) -> ~[UnsafeAtomicRcBox<T>] {
     pub fn get(&self) -> *mut T {
         unsafe {
             let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
-            // FIXME(#6598) Change Acquire to Relaxed.
-            assert!(data.count.load(Acquire) > 0);
+            assert!(data.count.load(Relaxed) > 0);
             let r: *mut T = data.data.get_mut_ref();
             cast::forget(data);
             return r;
@@ -107,7 +106,7 @@ pub fn get(&self) -> *mut T {
     pub fn get_immut(&self) -> *T {
         unsafe {
             let data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
-            assert!(data.count.load(Acquire) > 0); // no barrier is really needed
+            assert!(data.count.load(Relaxed) > 0);
             let r: *T = data.data.get_ref();
             cast::forget(data);
             return r;
@@ -130,8 +129,7 @@ pub fn unwrap(self) -> T {
                 // Try to put our server end in the unwrapper slot.
                 // This needs no barrier -- it's protected by the release barrier on
                 // the xadd, and the acquire+release barrier in the destructor's xadd.
-                // FIXME(#6598) Change Acquire to Relaxed.
-                if data.unwrapper.fill(~(c1,p2), Acquire).is_none() {
+                if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
                     // Got in. Tell this handle's destructor not to run (we are now it).
                     this.data = ptr::mut_null();
                     // Drop our own reference.