]> git.lizzy.rs Git - rust.git/commitdiff
Do a task-killed check at the start of task 'timeslices'.
authorBen Blum <bblum@andrew.cmu.edu>
Mon, 8 Jul 2013 23:31:32 +0000 (19:31 -0400)
committerBen Blum <bblum@andrew.cmu.edu>
Sat, 20 Jul 2013 09:08:56 +0000 (05:08 -0400)
src/libstd/rt/kill.rs
src/libstd/rt/sched.rs

index 929e69d61732658c372b1ddb8fa78fa31ba442be..1ea9c073678e9ef0ccf6e5fd83ccfdb00fe8782f 100644 (file)
@@ -15,7 +15,7 @@
 use option::{Option, Some, None};
 use prelude::*;
 use rt::task::Task;
-use unstable::atomics::{AtomicUint, SeqCst};
+use unstable::atomics::{AtomicUint, Acquire, SeqCst};
 use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
 use util;
 
@@ -137,6 +137,16 @@ pub fn kill(&mut self) -> Option<~Task> {
         }
     }
 
+    #[inline]
+    pub fn killed(&self) -> bool {
+        // Called every context switch, so shouldn't report true if the task
+        // is unkillable with a kill signal pending.
+        let inner = unsafe { &*self.get() };
+        let flag  = unsafe { &*inner.killed.get() };
+        // FIXME(#6598): can use relaxed ordering (i think)
+        flag.load(Acquire) == KILL_KILLED
+    }
+
     pub fn notify_immediate_failure(&mut self) {
         // A benign data race may happen here if there are failing sibling
         // tasks that were also spawned-watched. The refcount's write barriers
@@ -287,6 +297,22 @@ pub fn collect_failure(&mut self, mut success: bool) {
         self.unkillable = 0;
     }
 
+    /// Fails if a kill signal was received.
+    #[inline]
+    pub fn check_killed(&self) {
+        match self.kill_handle {
+            Some(ref kill_handle) =>
+                // The task may be both unkillable and killed if it does some
+                // synchronization during unwinding or cleanup (for example,
+                // sending on a notify port). In that case failing won't help.
+                if self.unkillable == 0 && kill_handle.killed() {
+                    fail!(KILLED_MSG);
+                },
+            // This may happen during task death (see comments in collect_failure).
+            None => rtassert!(self.unkillable > 0),
+        }
+    }
+
     /// Enter a possibly-nested unkillable section of code.
     /// All calls must be paired with a subsequent call to allow_kill.
     #[inline]
index 4e4145ddc161ff9e5301272073d3cee0a29442a3..4b51508f0a41f0293723bf7464689405c5494585 100644 (file)
@@ -483,6 +483,11 @@ pub fn resume_task_immediately(~self, task: ~Task) {
 
             // Running tasks may have asked us to do some cleanup
             (*sched).run_cleanup_job();
+
+            // Must happen after running the cleanup job (of course).
+            // Might not be running in task context; if not, a later call to
+            // resume_task_immediately will take care of this.
+            (*sched).current_task.map(|t| t.death.check_killed());
         }
     }
 
@@ -524,6 +529,9 @@ pub fn deschedule_running_task_and_then(~self, f: &fn(&mut Scheduler, ~Task)) {
             // We could be executing in a different thread now
             let sched = Local::unsafe_borrow::<Scheduler>();
             (*sched).run_cleanup_job();
+
+            // As above, must happen after running the cleanup job.
+            (*sched).current_task.map(|t| t.death.check_killed());
         }
     }
 
@@ -559,6 +567,9 @@ pub fn switch_running_tasks_and_then(~self, next_task: ~Task,
             // We could be executing in a different thread now
             let sched = Local::unsafe_borrow::<Scheduler>();
             (*sched).run_cleanup_job();
+
+            // As above, must happen after running the cleanup job.
+            (*sched).current_task.map(|t| t.death.check_killed());
         }
     }