1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 Task death: asynchronous killing, linked failure, exit code propagation.
15 This file implements two orthogonal building-blocks for communicating failure
16 between tasks. One is 'linked failure' or 'task killing', that is, a failing
17 task causing other tasks to fail promptly (even those that are blocked on
18 pipes or I/O). The other is 'exit code propagation', which affects the result
19 observed by the parent of a task::try task that itself spawns child tasks
20 (such as any #[test] function). In both cases the data structures live in
25 The model for killing involves two atomic flags, the "kill flag" and the
26 "unkillable flag". Operations on the kill flag include:
28 - In the taskgroup code (task/spawn.rs), tasks store a clone of their
29 KillHandle in their shared taskgroup. Another task in the group that fails
30 will use that handle to call kill().
31 - When a task blocks, it turns its ~Task into a BlockedTask by storing a
32 the transmuted ~Task pointer inside the KillHandle's kill flag. A task
33 trying to block and a task trying to kill it can simultaneously access the
34 kill flag, after which the task will get scheduled and fail (no matter who
35 wins the race). Likewise, a task trying to wake a blocked task normally and
36 a task trying to kill it can simultaneously access the flag; only one will
37 get the task to reschedule it.
39 Operations on the unkillable flag include:
41 - When a task becomes unkillable, it swaps on the flag to forbid any killer
42 from waking it up while it's blocked inside the unkillable section. If a
43 kill was already pending, the task fails instead of becoming unkillable.
44 - When a task is done being unkillable, it restores the flag to the normal
45 running state. If a kill was received-but-blocked during the unkillable
46 section, the task fails at this later point.
47 - When a task tries to kill another task, before swapping on the kill flag, it
48 first swaps on the unkillable flag, to see if it's "allowed" to wake up the
49 task. If it isn't, the killed task will receive the signal when it becomes
50 killable again. (Of course, a task trying to wake the task normally (e.g.
51 sending on a channel) does not access the unkillable flag at all.)
53 Why do we not need acquire/release barriers on any of the kill flag swaps?
54 This is because barriers establish orderings between accesses on different
55 memory locations, but each kill-related operation is only a swap on a single
56 location, so atomicity is all that matters. The exception is kill(), which
57 does a swap on both flags in sequence. kill() needs no barriers because it
58 does not matter if its two accesses are seen reordered on another CPU: if a
59 killer does perform both writes, it means it saw a KILL_RUNNING in the
60 unkillable flag, which means an unkillable task will see KILL_KILLED and fail
61 immediately (rendering the subsequent write to the kill flag unnecessary).
63 II. Exit code propagation.
65 FIXME(#7544): Decide on the ultimate model for this and document it.
71 use either::{Either, Left, Right};
72 use option::{Option, Some, None};
75 use task::spawn::Taskgroup;
76 use to_bytes::IterBytes;
77 use unstable::atomics::{AtomicUint, Relaxed};
78 use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
81 static KILLED_MSG: &'static str = "killed by linked failure";
83 // State values for the 'killed' and 'unkillable' atomic flags below.
84 static KILL_RUNNING: uint = 0;
85 static KILL_KILLED: uint = 1;
86 static KILL_UNKILLABLE: uint = 2;
88 struct KillFlag(AtomicUint);
89 type KillFlagHandle = UnsafeAtomicRcBox<KillFlag>;
91 /// A handle to a blocked task. Usually this means having the ~Task pointer by
92 /// ownership, but if the task is killable, a killer can steal it at any time.
93 pub enum BlockedTask {
95 Killable(KillFlagHandle),
98 // FIXME(#7544)(bblum): think about the cache efficiency of this
99 struct KillHandleInner {
100 // Is the task running, blocked, or killed? Possible values:
101 // * KILL_RUNNING - Not unkillable, no kill pending.
102 // * KILL_KILLED - Kill pending.
103 // * <ptr> - A transmuted blocked ~Task pointer.
104 // This flag is refcounted because it may also be referenced by a blocking
105 // concurrency primitive, used to wake the task normally, whose reference
106 // may outlive the handle's if the task is killed.
107 killed: KillFlagHandle,
108 // Has the task deferred kill signals? This flag guards the above one.
110 // * KILL_RUNNING - Not unkillable, no kill pending.
111 // * KILL_KILLED - Kill pending.
112 // * KILL_UNKILLABLE - Kill signals deferred.
113 unkillable: AtomicUint,
115 // Shared state between task and children for exit code propagation. These
116 // are here so we can re-use the kill handle to implement watched children
117 // tasks. Using a separate Arc-like would introduce extra atomic adds/subs
118 // into common spawn paths, so this is just for speed.
120 // Locklessly accessed; protected by the enclosing refcount's barriers.
121 any_child_failed: bool,
122 // A lazy list, consuming which may unwrap() many child tombstones.
123 child_tombstones: Option<~fn() -> bool>,
124 // Protects multiple children simultaneously creating tombstones.
125 graveyard_lock: LittleLock,
128 /// State shared between tasks used for task killing during linked failure.
130 pub struct KillHandle(UnsafeAtomicRcBox<KillHandleInner>);
132 /// Per-task state related to task death, killing, failure, etc.
134 // Shared among this task, its watched children, and any linked tasks who
135 // might kill it. This is optional so we can take it by-value at exit time.
136 kill_handle: Option<KillHandle>,
137 // Handle to a watching parent, if we have one, for exit code propagation.
138 watching_parent: Option<KillHandle>,
139 // Action to be done with the exit code. If set, also makes the task wait
140 // until all its watched children exit before collecting the status.
141 on_exit: Option<~fn(bool)>,
142 // nesting level counter for task::unkillable calls (0 == killable).
144 // nesting level counter for unstable::atomically calls (0 == can yield).
146 // A "spare" handle to the kill flag inside the kill handle. Used during
147 // blocking/waking as an optimization to avoid two xadds on the refcount.
148 spare_kill_flag: Option<KillFlagHandle>,
151 impl Drop for KillFlag {
152 // Letting a KillFlag with a task inside get dropped would leak the task.
153 // We could free it here, but the task should get awoken by hand somehow.
155 match self.load(Relaxed) {
156 KILL_RUNNING | KILL_KILLED => { },
157 _ => rtabort!("can't drop kill flag with a blocked task inside!"),
162 // Whenever a task blocks, it swaps out its spare kill flag to use as the
163 // blocked task handle. So unblocking a task must restore that spare.
164 unsafe fn revive_task_ptr(task_ptr: uint, spare_flag: Option<KillFlagHandle>) -> ~Task {
165 let mut task: ~Task = cast::transmute(task_ptr);
166 if task.death.spare_kill_flag.is_none() {
167 task.death.spare_kill_flag = spare_flag;
169 // A task's spare kill flag is not used for blocking in one case:
170 // when an unkillable task blocks on select. In this case, a separate
171 // one was created, which we now discard.
172 rtassert!(task.death.unkillable > 0);
178 /// Returns Some if the task was successfully woken; None if already killed.
179 pub fn wake(self) -> Option<~Task> {
181 Unkillable(task) => Some(task),
182 Killable(flag_arc) => {
183 let flag = unsafe { &mut **flag_arc.get() };
184 match flag.swap(KILL_RUNNING, Relaxed) {
185 KILL_RUNNING => None, // woken from select(), perhaps
186 KILL_KILLED => None, // a killer stole it already
188 Some(unsafe { revive_task_ptr(task_ptr, Some(flag_arc)) })
194 /// Create a blocked task, unless the task was already killed.
195 pub fn try_block(mut task: ~Task) -> Either<~Task, BlockedTask> {
196 // NB: As an optimization, we could give a free pass to being unkillable
197 // to tasks whose taskgroups haven't been initialized yet, but that
198 // introduces complications with select() and with the test cases below,
199 // and it's not clear the uncommon performance boost is worth it.
200 if task.death.unkillable > 0 {
201 Right(Unkillable(task))
203 rtassert!(task.death.kill_handle.is_some());
205 // The inverse of 'revive', above, occurs here.
206 // The spare kill flag will usually be Some, unless the task was
207 // already killed, in which case the killer will have deferred
208 // creating a new one until whenever it blocks during unwinding.
209 let flag_arc = match task.death.spare_kill_flag.take() {
210 Some(spare_flag) => spare_flag,
212 // A task that kills us won't have a spare kill flag to
213 // give back to us, so we restore it ourselves here. This
214 // situation should only arise when we're already failing.
215 rtassert!(task.unwinder.unwinding);
216 (*task.death.kill_handle.get_ref().get()).killed.clone()
219 let flag = &mut **flag_arc.get();
220 let task_ptr = cast::transmute(task);
221 // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
222 match flag.compare_and_swap(KILL_RUNNING, task_ptr, Relaxed) {
223 KILL_RUNNING => Right(Killable(flag_arc)),
224 KILL_KILLED => Left(revive_task_ptr(task_ptr, Some(flag_arc))),
225 x => rtabort!("can't block task! kill flag = %?", x),
231 /// Converts one blocked task handle to a list of many handles to the same.
232 pub fn make_selectable(self, num_handles: uint) -> ~[BlockedTask] {
233 let handles = match self {
234 Unkillable(task) => {
235 let flag = unsafe { KillFlag(AtomicUint::new(cast::transmute(task))) };
236 UnsafeAtomicRcBox::newN(flag, num_handles)
238 Killable(flag_arc) => flag_arc.cloneN(num_handles),
240 // Even if the task was unkillable before, we use 'Killable' because
241 // multiple pipes will have handles. It does not really mean killable.
242 handles.consume_iter().transform(|x| Killable(x)).collect()
245 // This assertion has two flavours because the wake involves an atomic op.
246 // In the faster version, destructors will fail dramatically instead.
247 #[inline] #[cfg(not(test))]
248 pub fn assert_already_awake(self) { }
249 #[inline] #[cfg(test)]
250 pub fn assert_already_awake(self) { assert!(self.wake().is_none()); }
252 /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag.
254 pub unsafe fn cast_to_uint(self) -> uint {
255 // Use the low bit to distinguish the enum variants, to save a second
256 // allocation in the indestructible case.
258 Unkillable(task) => {
259 let blocked_task_ptr: uint = cast::transmute(task);
260 rtassert!(blocked_task_ptr & 0x1 == 0);
263 Killable(flag_arc) => {
264 let blocked_task_ptr: uint = cast::transmute(~flag_arc);
265 rtassert!(blocked_task_ptr & 0x1 == 0);
266 blocked_task_ptr | 0x1
271 /// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag.
273 pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
274 if blocked_task_ptr & 0x1 == 0 {
275 Unkillable(cast::transmute(blocked_task_ptr))
277 let ptr: ~KillFlagHandle = cast::transmute(blocked_task_ptr & !0x1);
279 ~flag_arc => Killable(flag_arc)
285 // So that KillHandle can be hashed in the taskgroup bookkeeping code.
286 impl IterBytes for KillHandle {
287 fn iter_bytes(&self, lsb0: bool, f: &fn(buf: &[u8]) -> bool) -> bool {
288 self.data.iter_bytes(lsb0, f)
291 impl Eq for KillHandle {
292 #[inline] fn eq(&self, other: &KillHandle) -> bool { self.data.eq(&other.data) }
293 #[inline] fn ne(&self, other: &KillHandle) -> bool { self.data.ne(&other.data) }
297 pub fn new() -> (KillHandle, KillFlagHandle) {
298 let (flag, flag_clone) =
299 UnsafeAtomicRcBox::new2(KillFlag(AtomicUint::new(KILL_RUNNING)));
300 let handle = KillHandle(UnsafeAtomicRcBox::new(KillHandleInner {
301 // Linked failure fields
303 unkillable: AtomicUint::new(KILL_RUNNING),
304 // Exit code propagation fields
305 any_child_failed: false,
306 child_tombstones: None,
307 graveyard_lock: LittleLock::new(),
312 // Will begin unwinding if a kill signal was received, unless already_failing.
313 // This can't be used recursively, because a task which sees a KILLED
314 // signal must fail immediately, which an already-unkillable task can't do.
316 pub fn inhibit_kill(&mut self, already_failing: bool) {
317 let inner = unsafe { &mut *self.get() };
318 // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
319 // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
320 match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, Relaxed) {
321 KILL_RUNNING => { }, // normal case
322 KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
323 _ => rtabort!("inhibit_kill: task already unkillable"),
327 // Will begin unwinding if a kill signal was received, unless already_failing.
329 pub fn allow_kill(&mut self, already_failing: bool) {
330 let inner = unsafe { &mut *self.get() };
331 // Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
332 // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
333 match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, Relaxed) {
334 KILL_UNKILLABLE => { }, // normal case
335 KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
336 _ => rtabort!("allow_kill: task already killable"),
340 // Send a kill signal to the handle's owning task. Returns the task itself
341 // if it was blocked and needs punted awake. To be called by other tasks.
342 pub fn kill(&mut self) -> Option<~Task> {
343 let inner = unsafe { &mut *self.get() };
344 if inner.unkillable.swap(KILL_KILLED, Relaxed) == KILL_RUNNING {
345 // Got in. Allowed to try to punt the task awake.
346 let flag = unsafe { &mut *inner.killed.get() };
347 match flag.swap(KILL_KILLED, Relaxed) {
348 // Task either not blocked or already taken care of.
349 KILL_RUNNING | KILL_KILLED => None,
350 // Got ownership of the blocked task.
351 // While the usual 'wake' path can just pass back the flag
352 // handle, we (the slower kill path) haven't an extra one lying
353 // around. The task will wake up without a spare.
354 task_ptr => Some(unsafe { revive_task_ptr(task_ptr, None) }),
357 // Otherwise it was either unkillable or already killed. Somebody
358 // else was here first who will deal with the kill signal.
364 pub fn killed(&self) -> bool {
365 // Called every context switch, so shouldn't report true if the task
366 // is unkillable with a kill signal pending.
367 let inner = unsafe { &*self.get() };
368 let flag = unsafe { &*inner.killed.get() };
369 // A barrier-related concern here is that a task that gets killed
370 // awake needs to see the killer's write of KILLED to this flag. This
371 // is analogous to receiving a pipe payload; the appropriate barrier
372 // should happen when enqueueing the task.
373 flag.load(Relaxed) == KILL_KILLED
376 pub fn notify_immediate_failure(&mut self) {
377 // A benign data race may happen here if there are failing sibling
378 // tasks that were also spawned-watched. The refcount's write barriers
379 // in UnsafeAtomicRcBox ensure that this write will be seen by the
380 // unwrapper/destructor, whichever task may unwrap it.
381 unsafe { (*self.get()).any_child_failed = true; }
384 // For use when a task does not need to collect its children's exit
385 // statuses, but the task has a parent which might want them.
386 pub fn reparent_children_to(self, parent: &mut KillHandle) {
387 // Optimistic path: If another child of the parent's already failed,
388 // we don't need to worry about any of this.
389 if unsafe { (*parent.get()).any_child_failed } {
393 // Try to see if all our children are gone already.
394 match self.try_unwrap() {
395 // Couldn't unwrap; children still alive. Reparent entire handle as
396 // our own tombstone, to be unwrapped later.
398 let this = Cell::new(this); // :(
399 do add_lazy_tombstone(parent) |other_tombstones| {
400 let this = Cell::new(this.take()); // :(
401 let others = Cell::new(other_tombstones); // :(
403 // Prefer to check tombstones that were there first,
404 // being "more fair" at the expense of tail-recursion.
405 others.take().map_move_default(true, |f| f()) && {
406 let mut inner = this.take().unwrap();
407 (!inner.any_child_failed) &&
408 inner.child_tombstones.take().map_move_default(true, |f| f())
413 // Whether or not all children exited, one or more already failed.
414 Right(KillHandleInner { any_child_failed: true, _ }) => {
415 parent.notify_immediate_failure();
417 // All children exited, but some left behind tombstones that we
418 // don't want to wait on now. Give them to our parent.
419 Right(KillHandleInner { any_child_failed: false,
420 child_tombstones: Some(f), _ }) => {
421 let f = Cell::new(f); // :(
422 do add_lazy_tombstone(parent) |other_tombstones| {
423 let f = Cell::new(f.take()); // :(
424 let others = Cell::new(other_tombstones); // :(
426 // Prefer fairness to tail-recursion, as in above case.
427 others.take().map_move_default(true, |f| f()) &&
432 // All children exited, none failed. Nothing to do!
433 Right(KillHandleInner { any_child_failed: false,
434 child_tombstones: None, _ }) => { }
437 // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule.
439 fn add_lazy_tombstone(parent: &mut KillHandle,
440 blk: &fn(Option<~fn() -> bool>) -> ~fn() -> bool) {
442 let inner: &mut KillHandleInner = unsafe { &mut *parent.get() };
444 do inner.graveyard_lock.lock {
445 // Update the current "head node" of the lazy list.
446 inner.child_tombstones =
447 Some(blk(util::replace(&mut inner.child_tombstones, None)));
455 pub fn new() -> Death {
456 let (handle, spare) = KillHandle::new();
458 kill_handle: Some(handle),
459 watching_parent: None,
463 spare_kill_flag: Some(spare),
467 pub fn new_child(&self) -> Death {
469 let (handle, spare) = KillHandle::new();
471 kill_handle: Some(handle),
472 watching_parent: self.kill_handle.clone(),
476 spare_kill_flag: Some(spare),
480 /// Collect failure exit codes from children and propagate them to a parent.
481 pub fn collect_failure(&mut self, mut success: bool, group: Option<Taskgroup>) {
482 // This may run after the task has already failed, so even though the
483 // task appears to need to be killed, the scheduler should not fail us
484 // when we block to unwrap.
485 // (XXX: Another less-elegant reason for doing this is so that the use
486 // of the LittleLock in reparent_children_to doesn't need to access the
487 // unkillable flag in the kill_handle, since we'll have removed it.)
488 rtassert!(self.unkillable == 0);
491 // FIXME(#7544): See corresponding fixme at the callsite in task.rs.
492 // NB(#8192): Doesn't work with "let _ = ..."
493 { use util; util::ignore(group); }
495 // Step 1. Decide if we need to collect child failures synchronously.
496 do self.on_exit.take().map_move |on_exit| {
498 // We succeeded, but our children might not. Need to wait for them.
499 let mut inner = self.kill_handle.take_unwrap().unwrap();
500 if inner.any_child_failed {
503 // Lockless access to tombstones protected by unwrap barrier.
504 success = inner.child_tombstones.take().map_move_default(true, |f| f());
510 // Step 2. Possibly alert possibly-watching parent to failure status.
511 // Note that as soon as parent_handle goes out of scope, the parent
512 // can successfully unwrap its handle and collect our reported status.
513 do self.watching_parent.take().map_move |mut parent_handle| {
515 // Our handle might be None if we had an exit callback, and
516 // already unwrapped it. But 'success' being true means no
517 // child failed, so there's nothing to do (see below case).
518 do self.kill_handle.take().map_move |own_handle| {
519 own_handle.reparent_children_to(&mut parent_handle);
522 // Can inform watching parent immediately that we failed.
523 // (Note the importance of non-failing tasks NOT writing
524 // 'false', which could obscure another task's failure.)
525 parent_handle.notify_immediate_failure();
529 // Can't use allow_kill directly; that would require the kill handle.
530 rtassert!(self.unkillable == 1);
534 /// Fails if a kill signal was received.
536 pub fn check_killed(&self, already_failing: bool) {
537 match self.kill_handle {
538 Some(ref kill_handle) =>
539 // The task may be both unkillable and killed if it does some
540 // synchronization during unwinding or cleanup (for example,
541 // sending on a notify port). In that case failing won't help.
542 if self.unkillable == 0 && (!already_failing) && kill_handle.killed() {
545 // This may happen during task death (see comments in collect_failure).
546 None => rtassert!(self.unkillable > 0),
550 /// Enter a possibly-nested unkillable section of code.
551 /// All calls must be paired with a subsequent call to allow_kill.
553 pub fn inhibit_kill(&mut self, already_failing: bool) {
554 self.unkillable += 1;
555 // May fail, hence must happen *after* incrementing the counter
556 if self.unkillable == 1 {
557 rtassert!(self.kill_handle.is_some());
558 self.kill_handle.get_mut_ref().inhibit_kill(already_failing);
562 /// Exit a possibly-nested unkillable section of code.
563 /// All calls must be paired with a preceding call to inhibit_kill.
565 pub fn allow_kill(&mut self, already_failing: bool) {
566 rtassert!(self.unkillable != 0);
567 self.unkillable -= 1;
568 if self.unkillable == 0 {
569 rtassert!(self.kill_handle.is_some());
570 self.kill_handle.get_mut_ref().allow_kill(already_failing);
574 /// Enter a possibly-nested "atomic" section of code. Just for assertions.
575 /// All calls must be paired with a subsequent call to allow_yield.
577 pub fn inhibit_yield(&mut self) {
578 self.wont_sleep += 1;
581 /// Exit a possibly-nested "atomic" section of code. Just for assertions.
582 /// All calls must be paired with a preceding call to inhibit_yield.
584 pub fn allow_yield(&mut self) {
585 rtassert!(self.wont_sleep != 0);
586 self.wont_sleep -= 1;
589 /// Ensure that the task is allowed to become descheduled.
591 pub fn assert_may_sleep(&self) {
592 if self.wont_sleep != 0 {
593 rtabort!("illegal atomic-sleep: can't deschedule inside atomically()");
598 impl Drop for Death {
600 // Mustn't be in an atomic or unkillable section at task death.
601 rtassert!(self.unkillable == 0);
602 rtassert!(self.wont_sleep == 0);
608 #[allow(unused_mut)];
614 // Test cases don't care about the spare killed flag.
615 fn make_kill_handle() -> KillHandle { let (h,_) = KillHandle::new(); h }
617 #[ignore(reason = "linked failure")]
619 fn no_tombstone_success() {
620 do run_in_newsched_task {
621 // Tests case 4 of the 4-way match in reparent_children.
622 let mut parent = make_kill_handle();
623 let mut child = make_kill_handle();
625 // Without another handle to child, the try unwrap should succeed.
626 child.reparent_children_to(&mut parent);
627 let mut parent_inner = parent.unwrap();
628 assert!(parent_inner.child_tombstones.is_none());
629 assert!(parent_inner.any_child_failed == false);
633 fn no_tombstone_failure() {
634 do run_in_newsched_task {
635 // Tests case 2 of the 4-way match in reparent_children.
636 let mut parent = make_kill_handle();
637 let mut child = make_kill_handle();
639 child.notify_immediate_failure();
640 // Without another handle to child, the try unwrap should succeed.
641 child.reparent_children_to(&mut parent);
642 let mut parent_inner = parent.unwrap();
643 assert!(parent_inner.child_tombstones.is_none());
644 // Immediate failure should have been propagated.
645 assert!(parent_inner.any_child_failed);
649 fn no_tombstone_because_sibling_already_failed() {
650 do run_in_newsched_task {
651 // Tests "case 0, the optimistic path in reparent_children.
652 let mut parent = make_kill_handle();
653 let mut child1 = make_kill_handle();
654 let mut child2 = make_kill_handle();
655 let mut link = child2.clone();
657 // Should set parent's child_failed flag
658 child1.notify_immediate_failure();
659 child1.reparent_children_to(&mut parent);
660 // Should bypass trying to unwrap child2 entirely.
661 // Otherwise, due to 'link', it would try to tombstone.
662 child2.reparent_children_to(&mut parent);
663 // Should successfully unwrap even though 'link' is still alive.
664 let mut parent_inner = parent.unwrap();
665 assert!(parent_inner.child_tombstones.is_none());
666 // Immediate failure should have been propagated by first child.
667 assert!(parent_inner.any_child_failed);
672 fn one_tombstone_success() {
673 do run_in_newsched_task {
674 let mut parent = make_kill_handle();
675 let mut child = make_kill_handle();
676 let mut link = child.clone();
678 // Creates 1 tombstone. Existence of 'link' makes try-unwrap fail.
679 child.reparent_children_to(&mut parent);
680 // Let parent collect tombstones.
682 // Must have created a tombstone
683 let mut parent_inner = parent.unwrap();
684 assert!(parent_inner.child_tombstones.take_unwrap()());
685 assert!(parent_inner.any_child_failed == false);
689 fn one_tombstone_failure() {
690 do run_in_newsched_task {
691 let mut parent = make_kill_handle();
692 let mut child = make_kill_handle();
693 let mut link = child.clone();
695 // Creates 1 tombstone. Existence of 'link' makes try-unwrap fail.
696 child.reparent_children_to(&mut parent);
697 // Must happen after tombstone to not be immediately propagated.
698 link.notify_immediate_failure();
699 // Let parent collect tombstones.
701 // Must have created a tombstone
702 let mut parent_inner = parent.unwrap();
703 // Failure must be seen in the tombstone.
704 assert!(parent_inner.child_tombstones.take_unwrap()() == false);
705 assert!(parent_inner.any_child_failed == false);
709 fn two_tombstones_success() {
710 do run_in_newsched_task {
711 let mut parent = make_kill_handle();
712 let mut middle = make_kill_handle();
713 let mut child = make_kill_handle();
714 let mut link = child.clone();
716 child.reparent_children_to(&mut middle); // case 1 tombstone
717 // 'middle' should try-unwrap okay, but still have to reparent.
718 middle.reparent_children_to(&mut parent); // case 3 tombston
719 // Let parent collect tombstones.
721 // Must have created a tombstone
722 let mut parent_inner = parent.unwrap();
723 assert!(parent_inner.child_tombstones.take_unwrap()());
724 assert!(parent_inner.any_child_failed == false);
728 fn two_tombstones_failure() {
729 do run_in_newsched_task {
730 let mut parent = make_kill_handle();
731 let mut middle = make_kill_handle();
732 let mut child = make_kill_handle();
733 let mut link = child.clone();
735 child.reparent_children_to(&mut middle); // case 1 tombstone
736 // Must happen after tombstone to not be immediately propagated.
737 link.notify_immediate_failure();
738 // 'middle' should try-unwrap okay, but still have to reparent.
739 middle.reparent_children_to(&mut parent); // case 3 tombstone
740 // Let parent collect tombstones.
742 // Must have created a tombstone
743 let mut parent_inner = parent.unwrap();
744 // Failure must be seen in the tombstone.
745 assert!(parent_inner.child_tombstones.take_unwrap()() == false);
746 assert!(parent_inner.any_child_failed == false);
750 // Task killing tests
754 do run_in_newsched_task {
755 let mut handle = make_kill_handle();
756 assert!(!handle.killed());
757 assert!(handle.kill().is_none());
758 assert!(handle.killed());
764 do run_in_newsched_task {
765 let mut handle = make_kill_handle();
766 assert!(!handle.killed());
767 assert!(handle.kill().is_none());
768 assert!(handle.killed());
769 assert!(handle.kill().is_none());
770 assert!(handle.killed());
775 fn unkillable_after_kill() {
776 do run_in_newsched_task {
777 let mut handle = make_kill_handle();
778 assert!(handle.kill().is_none());
779 assert!(handle.killed());
780 let handle_cell = Cell::new(handle);
781 let result = do spawntask_try {
782 handle_cell.take().inhibit_kill(false);
784 assert!(result.is_err());
789 fn unkillable_during_kill() {
790 do run_in_newsched_task {
791 let mut handle = make_kill_handle();
792 handle.inhibit_kill(false);
793 assert!(handle.kill().is_none());
794 assert!(!handle.killed());
795 let handle_cell = Cell::new(handle);
796 let result = do spawntask_try {
797 handle_cell.take().allow_kill(false);
799 assert!(result.is_err());
804 fn unkillable_before_kill() {
805 do run_in_newsched_task {
806 let mut handle = make_kill_handle();
807 handle.inhibit_kill(false);
808 handle.allow_kill(false);
809 assert!(handle.kill().is_none());
810 assert!(handle.killed());
814 // Task blocking tests
817 fn block_and_wake() {
818 do with_test_task |mut task| {
819 BlockedTask::try_block(task).unwrap_right().wake().unwrap()
823 #[ignore(reason = "linked failure")]
825 fn block_and_get_killed() {
826 do with_test_task |mut task| {
827 let mut handle = task.death.kill_handle.get_ref().clone();
828 let result = BlockedTask::try_block(task).unwrap_right();
829 let task = handle.kill().unwrap();
830 assert!(result.wake().is_none());
835 #[ignore(reason = "linked failure")]
837 fn block_already_killed() {
838 do with_test_task |mut task| {
839 let mut handle = task.death.kill_handle.get_ref().clone();
840 assert!(handle.kill().is_none());
841 BlockedTask::try_block(task).unwrap_left()
845 #[ignore(reason = "linked failure")]
847 fn block_unkillably_and_get_killed() {
848 do with_test_task |mut task| {
849 let mut handle = task.death.kill_handle.get_ref().clone();
850 task.death.inhibit_kill(false);
851 let result = BlockedTask::try_block(task).unwrap_right();
852 assert!(handle.kill().is_none());
853 let mut task = result.wake().unwrap();
854 // This call wants to fail, but we can't have that happen since
855 // we're not running in a newsched task, so we can't even use
856 // spawntask_try. But the failing behaviour is already tested
857 // above, in unkillable_during_kill(), so we punt on it here.
858 task.death.allow_kill(true);
863 #[ignore(reason = "linked failure")]
866 // Tests the "killable" path of casting to/from uint.
867 do run_in_newsched_task {
868 do with_test_task |mut task| {
869 let result = BlockedTask::try_block(task).unwrap_right();
870 let result = unsafe { result.cast_to_uint() };
871 let result = unsafe { BlockedTask::cast_from_uint(result) };
872 result.wake().unwrap()
877 #[ignore(reason = "linked failure")]
879 fn block_unkillably_on_pipe() {
880 // Tests the "indestructible" path of casting to/from uint.
881 do run_in_newsched_task {
882 do with_test_task |mut task| {
883 task.death.inhibit_kill(false);
884 let result = BlockedTask::try_block(task).unwrap_right();
885 let result = unsafe { result.cast_to_uint() };
886 let result = unsafe { BlockedTask::cast_from_uint(result) };
887 let mut task = result.wake().unwrap();
888 task.death.allow_kill(false);