]> git.lizzy.rs Git - rust.git/commitdiff
Add KillHandle and implement exit code propagation to replace join_latch
authorBen Blum <bblum@andrew.cmu.edu>
Wed, 3 Jul 2013 01:15:34 +0000 (21:15 -0400)
committerBen Blum <bblum@andrew.cmu.edu>
Sat, 20 Jul 2013 09:08:56 +0000 (05:08 -0400)
src/libstd/rt/kill.rs [new file with mode: 0644]
src/libstd/rt/mod.rs
src/libstd/unstable/sync.rs

diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
new file mode 100644 (file)
index 0000000..afd2d3b
--- /dev/null
@@ -0,0 +1,128 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Task death: asynchronous killing, linked failure, exit code propagation.
+
+use cell::Cell;
+use option::{Option, Some, None};
+use prelude::*;
+use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
+use util;
+
+// FIXME(#7544)(bblum): think about the cache efficiency of this
+struct KillHandleInner {
+    // ((more fields to be added in a future commit))
+
+    // Shared state between task and children for exit code propagation. These
+    // are here so we can re-use the kill handle to implement watched children
+    // tasks. Using a separate ARClike would introduce extra atomic adds/subs
+    // into common spawn paths, so this is just for speed.
+
+    // Locklessly accessed; protected by the enclosing refcount's barriers.
+    any_child_failed: bool,
+    // A lazy list, consuming which may unwrap() many child tombstones.
+    child_tombstones: Option<~fn() -> bool>,
+    // Protects multiple children simultaneously creating tombstones.
+    graveyard_lock: LittleLock,
+}
+
+/// State shared between tasks used for task killing during linked failure.
+#[deriving(Clone)]
+pub struct KillHandle(UnsafeAtomicRcBox<KillHandleInner>);
+
+impl KillHandle {
+    pub fn new() -> KillHandle {
+        KillHandle(UnsafeAtomicRcBox::new(KillHandleInner {
+            // Linked failure fields
+            // ((none yet))
+            // Exit code propagation fields
+            any_child_failed: false,
+            child_tombstones: None,
+            graveyard_lock:   LittleLock(),
+        }))
+    }
+
+    pub fn notify_immediate_failure(&mut self) {
+        // A benign data race may happen here if there are failing sibling
+        // tasks that were also spawned-watched. The refcount's write barriers
+        // in UnsafeAtomicRcBox ensure that this write will be seen by the
+        // unwrapper/destructor, whichever task may unwrap it.
+        unsafe { (*self.get()).any_child_failed = true; }
+    }
+
+    // For use when a task does not need to collect its children's exit
+    // statuses, but the task has a parent which might want them.
+    pub fn reparent_children_to(self, parent: &mut KillHandle) {
+        // Optimistic path: If another child of the parent's already failed,
+        // we don't need to worry about any of this.
+        if unsafe { (*parent.get()).any_child_failed } {
+            return;
+        }
+
+        // Try to see if all our children are gone already.
+        match unsafe { self.try_unwrap() } {
+            // Couldn't unwrap; children still alive. Reparent entire handle as
+            // our own tombstone, to be unwrapped later.
+            Left(this) => {
+                let this = Cell::new(this); // :(
+                do add_lazy_tombstone(parent) |other_tombstones| {
+                    let this = Cell::new(this.take()); // :(
+                    let others = Cell::new(other_tombstones); // :(
+                    || {
+                        // Prefer to check tombstones that were there first,
+                        // being "more fair" at the expense of tail-recursion.
+                        others.take().map_consume_default(true, |f| f()) && {
+                            let mut inner = unsafe { this.take().unwrap() };
+                            (!inner.any_child_failed) &&
+                                inner.child_tombstones.take_map_default(true, |f| f())
+                        }
+                    }
+                }
+            }
+            // Whether or not all children exited, one or more already failed.
+            Right(KillHandleInner { any_child_failed: true, _ }) => {
+                parent.notify_immediate_failure();
+            }
+            // All children exited, but some left behind tombstones that we
+            // don't want to wait on now. Give them to our parent.
+            Right(KillHandleInner { any_child_failed: false,
+                                    child_tombstones: Some(f), _ }) => {
+                let f = Cell::new(f); // :(
+                do add_lazy_tombstone(parent) |other_tombstones| {
+                    let f = Cell::new(f.take()); // :(
+                    let others = Cell::new(other_tombstones); // :(
+                    || {
+                        // Prefer fairness to tail-recursion, as in above case.
+                        others.take().map_consume_default(true, |f| f()) &&
+                            f.take()()
+                    }
+                }
+            }
+            // All children exited, none failed. Nothing to do!
+            Right(KillHandleInner { any_child_failed: false,
+                                    child_tombstones: None, _ }) => { }
+        }
+
+        // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule.
+        fn add_lazy_tombstone(parent: &mut KillHandle,
+                              blk: &fn(Option<~fn() -> bool>) -> ~fn() -> bool) {
+
+            let inner: &mut KillHandleInner = unsafe { &mut *parent.get() };
+            unsafe {
+                do inner.graveyard_lock.lock {
+                    // Update the current "head node" of the lazy list.
+                    inner.child_tombstones =
+                        Some(blk(util::replace(&mut inner.child_tombstones, None)));
+                }
+            }
+        }
+    }
+}
+
index 51f4737ef85fb0561cbd4472d2b1b02633e48cf7..1c3411b62478880a25e3fc2fd2cd6aa85d7f61e1 100644 (file)
@@ -83,6 +83,9 @@
 /// Implementations of language-critical runtime features like @.
 pub mod task;
 
+/// Facilities related to task failure, killing, and death.
+mod kill;
+
 /// The coroutine task scheduler, built on the `io` event loop.
 mod sched;
 
index 2af914cf458dfe4ae234b71f2c1c1b1f5fbaa328..b208ffd8f910860aee5f6fdd9e189345666a349b 100644 (file)
@@ -225,9 +225,9 @@ fn drop(&self) {
 /****************************************************************************/
 
 #[allow(non_camel_case_types)] // runtime type
-pub type rust_little_lock = *libc::c_void;
+type rust_little_lock = *libc::c_void;
 
-struct LittleLock {
+pub struct LittleLock {
     l: rust_little_lock,
 }
 
@@ -239,7 +239,7 @@ fn drop(&self) {
     }
 }
 
-fn LittleLock() -> LittleLock {
+pub fn LittleLock() -> LittleLock {
     unsafe {
         LittleLock {
             l: rust_create_little_lock()