1 // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 /*!**************************************************************************
12 * Spawning & linked failure
14 * Several data structures are involved in task management to allow properly
15 * propagating failure across linked/supervised tasks.
17 * (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of
18 * all tasks that are part of the group. Some tasks are 'members', which
19 * means if they fail, they will kill everybody else in the taskgroup.
20 * Other tasks are 'descendants', which means they will not kill tasks
21 * from this group, but can be killed by failing members.
23 * A new one of these is created each spawn_linked or spawn_supervised.
25 * (2) The "tcb" is a per-task control structure that tracks a task's spawn
26 * configuration. It contains a reference to its taskgroup_arc, a
27 * reference to its node in the ancestor list (below), a flag for
28 * whether it's part of the 'main'/'root' taskgroup, and an optionally
29 * configured notification port. These are stored in TLS.
31 * (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which
32 * tracks 'generations' of taskgroups -- a group's ancestors are groups
33 * which (directly or transitively) spawn_supervised-ed them. Each task
34 * is recorded in the 'descendants' of each of its ancestor groups.
36 * Spawning a supervised task is O(n) in the number of generations still
37 * alive, and exiting (by success or failure) that task is also O(n).
39 * This diagram depicts the references between these data structures:
41 * linked_________________________________
44 * ( A ) - - - - - - - > | {A,B} {}|< - - -( B )
45 * \___/ |_________| \___/
48 * | //| The following code causes this:
50 * / \ // || | group Y | fn taskA() {
51 * ( C )- - - ||- - - > |{C} {D,E}| spawn(taskB);
52 * \___/ / \=====> |_________| spawn_unlinked(taskC);
53 * supervise /gen \ ...
55 * | //| \__/ fn taskB() { ... }
56 * |__ // /\ _________ fn taskC() {
57 * / \/ || | group Z | spawn_supervised(taskD);
58 * ( D )- - - ||- - - > | {D} {E} | ...
59 * \___/ / \=====> |_________| }
60 * supervise /gen \ fn taskD() {
61 * | __ \ 01 / spawn_supervised(taskE);
64 * / \/ | group W | fn taskE() { ... }
65 * ( E )- - - - - - - > | {E} {} |
68 * "tcb" "taskgroup_arc"
71 ****************************************************************************/
73 #[doc(hidden)]; // FIXME #3538
78 use comm::{Chan, GenericChan};
83 use task::local_data_priv::{local_get, local_set, OldHandle};
84 use task::rt::rust_task;
86 use task::{Failure, ManualThreads, PlatformThread, SchedOpts, SingleThreaded};
87 use task::{Success, TaskOpts, TaskResult, ThreadPerCore, ThreadPerTask};
88 use task::{ExistingScheduler, SchedulerHandle};
93 #[cfg(test)] use task::default_task_opts;
95 macro_rules! move_it (
96 { $x:expr } => ( unsafe { let y = *ptr::to_unsafe_ptr(&($x)); y } )
99 type TaskSet = HashSet<*rust_task>;
101 fn new_taskset() -> TaskSet {
104 fn taskset_insert(tasks: &mut TaskSet, task: *rust_task) {
105 let didnt_overwrite = tasks.insert(task);
106 assert!(didnt_overwrite);
108 fn taskset_remove(tasks: &mut TaskSet, task: *rust_task) {
109 let was_present = tasks.remove(&task);
110 assert!(was_present);
112 pub fn taskset_each(tasks: &TaskSet, blk: &fn(v: *rust_task) -> bool) {
113 tasks.each(|k| blk(*k))
116 // One of these per group of linked-failure tasks.
117 struct TaskGroupData {
118 // All tasks which might kill this group. When this is empty, the group
119 // can be "GC"ed (i.e., its link in the ancestor list can be removed).
120 mut members: TaskSet,
121 // All tasks unidirectionally supervised by (directly or transitively)
122 // tasks in this group.
123 mut descendants: TaskSet,
125 type TaskGroupArc = unstable::Exclusive<Option<TaskGroupData>>;
127 type TaskGroupInner<'self> = &'self mut Option<TaskGroupData>;
129 // A taskgroup is 'dead' when nothing can cause it to fail; only members can.
130 fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
131 (&const tg.members).is_empty()
134 // A list-like structure by which taskgroups keep track of all ancestor groups
135 // which may kill them. Needed for tasks to be able to remove themselves from
136 // ancestor groups upon exit. The list has a node for each "generation", and
137 // ends either at the root taskgroup (which has no ancestors) or at a
138 // taskgroup which was spawned-unlinked. Tasks from intermediate generations
139 // have references to the middle of the list; when intermediate generations
140 // die, their node in the list will be collected at a descendant's spawn-time.
141 struct AncestorNode {
142 // Since the ancestor list is recursive, we end up with references to
143 // exclusives within other exclusives. This is dangerous business (if
144 // circular references arise, deadlock and memory leaks are imminent).
145 // Hence we assert that this counter monotonically decreases as we
146 // approach the tail of the list.
147 // FIXME(#3068): Make the generation counter togglable with #[cfg(debug)].
149 // Should really be an immutable non-option. This way appeases borrowck.
150 mut parent_group: Option<TaskGroupArc>,
151 // Recursive rest of the list.
152 mut ancestors: AncestorList,
155 struct AncestorList(Option<unstable::Exclusive<AncestorNode>>);
157 // Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety.
159 fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
164 fn access_ancestors<U>(x: &unstable::Exclusive<AncestorNode>,
165 blk: &fn(x: &mut AncestorNode) -> U) -> U {
169 // Iterates over an ancestor list.
170 // (1) Runs forward_blk on each ancestral taskgroup in the list
171 // (2) If forward_blk "break"s, runs optional bail_blk on all ancestral
172 // taskgroups that forward_blk already ran on successfully (Note: bail_blk
173 // is NOT called on the block that forward_blk broke on!).
174 // (3) As a bonus, coalesces away all 'dead' taskgroup nodes in the list.
175 // FIXME(#2190): Change Option<@fn(...)> to Option<&fn(...)>, to save on
176 // allocations. Once that bug is fixed, changing the sigil should suffice.
177 fn each_ancestor(list: &mut AncestorList,
178 bail_opt: Option<@fn(TaskGroupInner)>,
179 forward_blk: &fn(TaskGroupInner) -> bool)
181 // "Kickoff" call - there was no last generation.
182 return !coalesce(list, bail_opt, forward_blk, uint::max_value);
184 // Recursively iterates, and coalesces afterwards if needed. Returns
185 // whether or not unwinding is needed (i.e., !successful iteration).
186 fn coalesce(list: &mut AncestorList,
187 bail_opt: Option<@fn(TaskGroupInner)>,
188 forward_blk: &fn(TaskGroupInner) -> bool,
189 last_generation: uint) -> bool {
190 // Need to swap the list out to use it, to appease borrowck.
191 let tmp_list = util::replace(&mut *list, AncestorList(None));
192 let (coalesce_this, early_break) =
193 iterate(&tmp_list, bail_opt, forward_blk, last_generation);
194 // What should our next ancestor end up being?
195 if coalesce_this.is_some() {
196 // Needed coalesce. Our next ancestor becomes our old
197 // ancestor's next ancestor. ("next = old_next->next;")
198 *list = coalesce_this.unwrap();
200 // No coalesce; restore from tmp. ("next = old_next;")
206 // Returns an optional list-to-coalesce and whether unwinding is needed.
207 // Option<ancestor_list>:
208 // Whether or not the ancestor taskgroup being iterated over is
209 // dead or not; i.e., it has no more tasks left in it, whether or not
210 // it has descendants. If dead, the caller shall coalesce it away.
212 // True if the supplied block did 'break', here or in any recursive
213 // calls. If so, must call the unwinder on all previous nodes.
214 fn iterate(ancestors: &AncestorList,
215 bail_opt: Option<@fn(TaskGroupInner)>,
216 forward_blk: &fn(TaskGroupInner) -> bool,
217 last_generation: uint)
218 -> (Option<AncestorList>, bool) {
219 // At each step of iteration, three booleans are at play which govern
220 // how the iteration should behave.
221 // 'nobe_is_dead' - Should the list should be coalesced at this point?
222 // Largely unrelated to the other two.
223 // 'need_unwind' - Should we run the bail_blk at this point? (i.e.,
224 // do_continue was false not here, but down the line)
225 // 'do_continue' - Did the forward_blk succeed at this point? (i.e.,
226 // should we recurse? or should our callers unwind?)
228 // The map defaults to None, because if ancestors is None, we're at
229 // the end of the list, which doesn't make sense to coalesce.
230 return do (**ancestors).map_default((None,false)) |ancestor_arc| {
231 // NB: Takes a lock! (this ancestor node)
232 do access_ancestors(ancestor_arc) |nobe| {
233 // Check monotonicity
234 assert!(last_generation > nobe.generation);
235 /*##########################################################*
236 * Step 1: Look at this ancestor group (call iterator block).
237 *##########################################################*/
238 let mut nobe_is_dead = false;
240 // NB: Takes a lock! (this ancestor node's parent group)
241 do with_parent_tg(&mut nobe.parent_group) |tg_opt| {
242 // Decide whether this group is dead. Note that the
243 // group being *dead* is disjoint from it *failing*.
244 nobe_is_dead = match *tg_opt {
245 Some(ref tg) => taskgroup_is_dead(tg),
248 // Call iterator block. (If the group is dead, it's
249 // safe to skip it. This will leave our *rust_task
250 // hanging around in the group even after it's freed,
251 // but that's ok because, by virtue of the group being
252 // dead, nobody will ever kill-all (foreach) over it.)
253 if nobe_is_dead { true } else { forward_blk(tg_opt) }
255 /*##########################################################*
256 * Step 2: Recurse on the rest of the list; maybe coalescing.
257 *##########################################################*/
258 // 'need_unwind' is only set if blk returned true above, *and*
259 // the recursive call early-broke.
260 let mut need_unwind = false;
262 // NB: Takes many locks! (ancestor nodes & parent groups)
263 need_unwind = coalesce(&mut nobe.ancestors, bail_opt,
264 forward_blk, nobe.generation);
266 /*##########################################################*
267 * Step 3: Maybe unwind; compute return info for our caller.
268 *##########################################################*/
269 if need_unwind && !nobe_is_dead {
270 for bail_opt.each |bail_blk| {
271 do with_parent_tg(&mut nobe.parent_group) |tg_opt| {
276 // Decide whether our caller should unwind.
277 need_unwind = need_unwind || !do_continue;
278 // Tell caller whether or not to coalesce and/or unwind
280 // Swap the list out here; the caller replaces us with it.
281 let rest = util::replace(&mut nobe.ancestors,
283 (Some(rest), need_unwind)
290 // Wrapper around exclusive::with that appeases borrowck.
291 fn with_parent_tg<U>(parent_group: &mut Option<TaskGroupArc>,
292 blk: &fn(TaskGroupInner) -> U) -> U {
293 // If this trips, more likely the problem is 'blk' failed inside.
294 let tmp_arc = parent_group.swap_unwrap();
295 let result = do access_group(&tmp_arc) |tg_opt| { blk(tg_opt) };
296 *parent_group = Some(tmp_arc);
302 // One of these per task.
305 // List of tasks with whose fates this one's is intertwined.
306 tasks: TaskGroupArc, // 'none' means the group has failed.
307 // Lists of tasks who will kill us if they fail, but whom we won't kill.
308 mut ancestors: AncestorList,
310 notifier: Option<AutoNotify>,
314 // Runs on task exit.
317 // If we are failing, the whole taskgroup needs to die.
318 if rt::rust_task_is_unwinding(self.me) {
319 for self.notifier.each |x| { x.failed = true; }
320 // Take everybody down with us.
321 do access_group(&self.tasks) |tg| {
322 kill_taskgroup(tg, self.me, self.is_main);
325 // Remove ourselves from the group(s).
326 do access_group(&self.tasks) |tg| {
327 leave_taskgroup(tg, self.me, true);
330 // It doesn't matter whether this happens before or after dealing
331 // with our own taskgroup, so long as both happen before we die.
332 // We remove ourself from every ancestor we can, so no cleanup; no
334 for each_ancestor(&mut self.ancestors, None) |ancestor_group| {
335 leave_taskgroup(ancestor_group, self.me, false);
341 fn TCB(me: *rust_task, tasks: TaskGroupArc, ancestors: AncestorList,
342 is_main: bool, notifier: Option<AutoNotify>) -> TCB {
343 for notifier.each |x| { x.failed = false; }
348 ancestors: ancestors,
355 notify_chan: Chan<TaskResult>,
359 impl Drop for AutoNotify {
361 let result = if self.failed { Failure } else { Success };
362 self.notify_chan.send(result);
366 fn AutoNotify(chan: Chan<TaskResult>) -> AutoNotify {
369 failed: true // Un-set above when taskgroup successfully made.
373 fn enlist_in_taskgroup(state: TaskGroupInner, me: *rust_task,
374 is_member: bool) -> bool {
375 let newstate = util::replace(&mut *state, None);
376 // If 'None', the group was failing. Can't enlist.
377 if newstate.is_some() {
378 let group = newstate.unwrap();
379 taskset_insert(if is_member { &mut group.members }
380 else { &mut group.descendants }, me);
381 *state = Some(group);
388 // NB: Runs in destructor/post-exit context. Can't 'fail'.
389 fn leave_taskgroup(state: TaskGroupInner, me: *rust_task,
391 let newstate = util::replace(&mut *state, None);
392 // If 'None', already failing and we've already gotten a kill signal.
393 if newstate.is_some() {
394 let group = newstate.unwrap();
395 taskset_remove(if is_member { &mut group.members }
396 else { &mut group.descendants }, me);
397 *state = Some(group);
401 // NB: Runs in destructor/post-exit context. Can't 'fail'.
402 fn kill_taskgroup(state: TaskGroupInner, me: *rust_task, is_main: bool) {
404 // NB: We could do the killing iteration outside of the group arc, by
405 // having "let mut newstate" here, swapping inside, and iterating
406 // after. But that would let other exiting tasks fall-through and exit
407 // while we were trying to kill them, causing potential
408 // use-after-free. A task's presence in the arc guarantees it's alive
409 // only while we hold the lock, so if we're failing, all concurrently
410 // exiting tasks must wait for us. To do it differently, we'd have to
411 // use the runtime's task refcounting, but that could leave task
412 // structs around long after their task exited.
413 let newstate = util::replace(state, None);
414 // Might already be None, if Somebody is failing simultaneously.
415 // That's ok; only one task needs to do the dirty work. (Might also
416 // see 'None' if Somebody already failed and we got a kill signal.)
417 if newstate.is_some() {
418 let group = newstate.unwrap();
419 for taskset_each(&group.members) |sibling| {
420 // Skip self - killing ourself won't do much good.
422 rt::rust_task_kill_other(sibling);
425 for taskset_each(&group.descendants) |child| {
426 assert!(child != me);
427 rt::rust_task_kill_other(child);
429 // Only one task should ever do this.
431 rt::rust_task_kill_all(me);
433 // Do NOT restore state to Some(..)! It stays None to indicate
434 // that the whole taskgroup is failing, to forbid new spawns.
436 // (note: multiple tasks may reach this point)
440 // FIXME (#2912): Work around core-vs-coretest function duplication. Can't use
441 // a proper closure because the #[test]s won't understand. Have to fake it.
442 macro_rules! taskgroup_key (
443 // Use a "code pointer" value that will never be a real code pointer.
444 () => (cast::transmute((-2 as uint, 0u)))
447 fn gen_child_taskgroup(linked: bool, supervised: bool)
448 -> (TaskGroupArc, AncestorList, bool) {
450 let spawner = rt::rust_get_task();
451 /*##################################################################*
452 * Step 1. Get spawner's taskgroup info.
453 *##################################################################*/
454 let spawner_group = match local_get(OldHandle(spawner), taskgroup_key!()) {
456 // Main task, doing first spawn ever. Lazily initialise here.
457 let mut members = new_taskset();
458 taskset_insert(&mut members, spawner);
459 let tasks = unstable::exclusive(Some(TaskGroupData {
461 descendants: new_taskset(),
463 // Main task/group has no ancestors, no notifier, etc.
465 @TCB(spawner, tasks, AncestorList(None), true, None);
466 local_set(OldHandle(spawner), taskgroup_key!(), group);
471 /*##################################################################*
472 * Step 2. Process spawn options for child.
473 *##################################################################*/
475 // Child is in the same group as spawner.
476 let g = spawner_group.tasks.clone();
477 // Child's ancestors are spawner's ancestors.
478 let a = share_ancestors(&mut spawner_group.ancestors);
479 // Propagate main-ness.
480 (g, a, spawner_group.is_main)
482 // Child is in a separate group from spawner.
483 let g = unstable::exclusive(Some(TaskGroupData {
484 members: new_taskset(),
485 descendants: new_taskset(),
487 let a = if supervised {
488 // Child's ancestors start with the spawner.
490 share_ancestors(&mut spawner_group.ancestors);
491 // FIXME(#3068) - The generation counter is only used for a
492 // debug assertion, but initialising it requires locking a
493 // mutex. Hence it should be enabled only in debug builds.
495 match *old_ancestors {
497 access_ancestors(arc, |a| a.generation+1)
499 None => 0 // the actual value doesn't really matter.
501 assert!(new_generation < uint::max_value);
502 // Build a new node in the ancestor list.
503 AncestorList(Some(unstable::exclusive(AncestorNode {
504 generation: new_generation,
505 parent_group: Some(spawner_group.tasks.clone()),
506 ancestors: old_ancestors,
509 // Child has no ancestors.
516 fn share_ancestors(ancestors: &mut AncestorList) -> AncestorList {
517 // Appease the borrow-checker. Really this wants to be written as:
519 // Some(ancestor_arc) { ancestor_list(Some(ancestor_arc.clone())) }
520 // None { ancestor_list(None) }
521 let tmp = util::replace(&mut **ancestors, None);
523 let ancestor_arc = tmp.unwrap();
524 let result = ancestor_arc.clone();
525 **ancestors = Some(ancestor_arc);
526 AncestorList(Some(result))
533 pub fn spawn_raw(opts: TaskOpts, f: ~fn()) {
538 spawn_raw_oldsched(opts, f)
541 spawn_raw_newsched(opts, f)
543 SchedulerContext => {
544 fail!(~"can't spawn from scheduler context")
547 fail!(~"can't spawn from global context")
552 fn spawn_raw_newsched(_opts: TaskOpts, f: ~fn()) {
555 let mut sched = local_sched::take();
556 let task = ~Task::new(&mut sched.stack_pool, f);
557 sched.schedule_new_task(task);
560 fn spawn_raw_oldsched(opts: TaskOpts, f: ~fn()) {
562 let (child_tg, ancestors, is_main) =
563 gen_child_taskgroup(opts.linked, opts.supervised);
566 let child_data = Cell((child_tg, ancestors, f));
567 // Being killed with the unsafe task/closure pointers would leak them.
569 // Agh. Get move-mode items into the closure. FIXME (#2829)
570 let (child_tg, ancestors, f) = child_data.take();
571 // Create child task.
572 let new_task = match opts.sched.mode {
573 DefaultScheduler => rt::new_task(),
574 _ => new_task_in_sched(opts.sched)
576 assert!(!new_task.is_null());
577 // Getting killed after here would leak the task.
578 let notify_chan = if opts.notify_chan.is_none() {
581 Some(opts.notify_chan.swap_unwrap())
584 let child_wrapper = make_child_wrapper(new_task, child_tg,
585 ancestors, is_main, notify_chan, f);
587 let closure = cast::transmute(&child_wrapper);
589 // Getting killed between these two calls would free the child's
590 // closure. (Reordering them wouldn't help - then getting killed
591 // between them would leak.)
592 rt::start_task(new_task, closure);
593 cast::forget(child_wrapper);
597 // This function returns a closure-wrapper that we pass to the child task.
598 // (1) It sets up the notification channel.
599 // (2) It attempts to enlist in the child's group and all ancestor groups.
600 // (3a) If any of those fails, it leaves all groups, and does nothing.
601 // (3b) Otherwise it builds a task control structure and puts it in TLS,
602 // (4) ...and runs the provided body function.
603 fn make_child_wrapper(child: *rust_task, child_arc: TaskGroupArc,
604 ancestors: AncestorList, is_main: bool,
605 notify_chan: Option<Chan<TaskResult>>,
608 let child_data = Cell((child_arc, ancestors));
609 let result: ~fn() = || {
610 // Agh. Get move-mode items into the closure. FIXME (#2829)
611 let mut (child_arc, ancestors) = child_data.take();
612 // Child task runs this code.
614 // Even if the below code fails to kick the child off, we must
615 // send Something on the notify channel.
617 //let mut notifier = None;//notify_chan.map(|c| AutoNotify(c));
618 let notifier = match notify_chan {
619 Some(ref notify_chan_value) => {
620 let moved_ncv = move_it!(*notify_chan_value);
621 Some(AutoNotify(moved_ncv))
626 if enlist_many(child, &child_arc, &mut ancestors) {
627 let group = @TCB(child, child_arc, ancestors,
630 local_set(OldHandle(child), taskgroup_key!(), group);
633 // Run the child's body.
636 // TLS cleanup code will exit the taskgroup.
639 // Run the box annihilator.
640 // FIXME #4428: Crashy.
641 // unsafe { cleanup::annihilate(); }
645 // Set up membership in taskgroup and descendantship in all ancestor
646 // groups. If any enlistment fails, Some task was already failing, so
647 // don't let the child task run, and undo every successful enlistment.
648 fn enlist_many(child: *rust_task, child_arc: &TaskGroupArc,
649 ancestors: &mut AncestorList) -> bool {
650 // Join this taskgroup.
652 do access_group(child_arc) |child_tg| {
653 enlist_in_taskgroup(child_tg, child, true) // member
656 // Unwinding function in case any ancestral enlisting fails
657 let bail: @fn(TaskGroupInner) = |tg| {
658 leave_taskgroup(tg, child, false)
660 // Attempt to join every ancestor group.
662 for each_ancestor(ancestors, Some(bail)) |ancestor_tg| {
663 // Enlist as a descendant, not as an actual member.
664 // Descendants don't kill ancestor groups on failure.
665 if !enlist_in_taskgroup(ancestor_tg, child, false) {
669 // If any ancestor group fails, need to exit this group too.
671 do access_group(child_arc) |child_tg| {
672 leave_taskgroup(child_tg, child, true); // member
680 fn new_task_in_sched(opts: SchedOpts) -> *rust_task {
681 if opts.foreign_stack_size != None {
682 fail!(~"foreign_stack_size scheduler option unimplemented");
685 let num_threads = match opts.mode {
688 | ExistingScheduler(*)
689 | PlatformThread => 0u, /* Won't be used */
690 SingleThreaded => 1u,
691 ThreadPerCore => unsafe { rt::rust_num_threads() },
693 fail!(~"ThreadPerTask scheduling mode unimplemented")
695 ManualThreads(threads) => {
697 fail!(~"can not create a scheduler with no threads");
704 let sched_id = match opts.mode {
705 CurrentScheduler => rt::rust_get_sched_id(),
706 ExistingScheduler(SchedulerHandle(id)) => id,
707 PlatformThread => rt::rust_osmain_sched_id(),
708 _ => rt::rust_new_sched(num_threads)
710 rt::rust_new_task_in_sched(sched_id)
716 fn test_spawn_raw_simple() {
717 let (po, ch) = stream();
718 do spawn_raw(default_task_opts()) {
725 #[ignore(cfg(windows))]
726 fn test_spawn_raw_unsupervise() {
727 let opts = task::TaskOpts {
730 .. default_task_opts()
738 #[ignore(cfg(windows))]
739 fn test_spawn_raw_notify_success() {
740 let (notify_po, notify_ch) = comm::stream();
742 let opts = task::TaskOpts {
743 notify_chan: Some(notify_ch),
744 .. default_task_opts()
748 assert!(notify_po.recv() == Success);
752 #[ignore(cfg(windows))]
753 fn test_spawn_raw_notify_failure() {
754 // New bindings for these
755 let (notify_po, notify_ch) = comm::stream();
757 let opts = task::TaskOpts {
759 notify_chan: Some(notify_ch),
760 .. default_task_opts()
765 assert!(notify_po.recv() == Failure);