1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 /*!**************************************************************************
12 * Spawning & linked failure
14 * Several data structures are involved in task management to allow properly
15 * propagating failure across linked/supervised tasks.
17 * (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of
18 * all tasks that are part of the group. Some tasks are 'members', which
19 * means if they fail, they will kill everybody else in the taskgroup.
20 * Other tasks are 'descendants', which means they will not kill tasks
21 * from this group, but can be killed by failing members.
23 * A new one of these is created each spawn_linked or spawn_supervised.
25 * (2) The "tcb" is a per-task control structure that tracks a task's spawn
26 * configuration. It contains a reference to its taskgroup_arc, a
27 * reference to its node in the ancestor list (below), a flag for
28 * whether it's part of the 'main'/'root' taskgroup, and an optionally
29 * configured notification port. These are stored in TLS.
31 * (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which
32 * tracks 'generations' of taskgroups -- a group's ancestors are groups
33 * which (directly or transitively) spawn_supervised-ed them. Each task
34 * is recorded in the 'descendants' of each of its ancestor groups.
36 * Spawning a supervised task is O(n) in the number of generations still
37 * alive, and exiting (by success or failure) that task is also O(n).
39 * This diagram depicts the references between these data structures:
41 * linked_________________________________
44 * ( A ) - - - - - - - > | {A,B} {}|< - - -( B )
45 * \___/ |_________| \___/
48 * | //| The following code causes this:
50 * / \ // || | group Y | fn taskA() {
51 * ( C )- - - ||- - - > |{C} {D,E}| spawn(taskB);
52 * \___/ / \=====> |_________| spawn_unlinked(taskC);
53 * supervise /gen \ ...
55 * | //| \__/ fn taskB() { ... }
56 * |__ // /\ _________ fn taskC() {
57 * / \/ || | group Z | spawn_supervised(taskD);
58 * ( D )- - - ||- - - > | {D} {E} | ...
59 * \___/ / \=====> |_________| }
60 * supervise /gen \ fn taskD() {
61 * | __ \ 01 / spawn_supervised(taskE);
64 * / \/ | group W | fn taskE() { ... }
65 * ( E )- - - - - - - > | {E} {} |
68 * "tcb" "taskgroup_arc"
71 ****************************************************************************/
80 use container::MutableMap;
81 use comm::{Chan, GenericChan, oneshot};
82 use hashmap::{HashSet, HashSetConsumeIterator};
84 use task::local_data_priv::{local_get, local_set, OldHandle};
85 use task::rt::rust_task;
87 use task::{Failure, SingleThreaded};
88 use task::{Success, TaskOpts, TaskResult};
90 use to_bytes::IterBytes;
93 use unstable::sync::Exclusive;
94 use rt::{OldTaskContext, TaskContext, SchedulerContext, GlobalContext, context};
96 use rt::task::{Task, Sched};
97 use rt::kill::KillHandle;
98 use rt::sched::Scheduler;
99 use rt::uv::uvio::UvEventLoop;
100 use rt::thread::Thread;
102 #[cfg(test)] use task::default_task_opts;
103 #[cfg(test)] use comm;
104 #[cfg(test)] use task;
113 impl Clone for TaskHandle {
114 fn clone(&self) -> TaskHandle {
116 OldTask(x) => OldTask(x),
117 NewTask(ref x) => NewTask(x.clone()),
122 impl IterBytes for TaskHandle {
123 fn iter_bytes(&self, lsb0: bool, f: &fn(buf: &[u8]) -> bool) -> bool {
125 OldTask(ref x) => x.iter_bytes(lsb0, f),
126 NewTask(ref x) => x.iter_bytes(lsb0, f),
131 struct TaskSet(HashSet<TaskHandle>);
135 fn new() -> TaskSet {
136 TaskSet(HashSet::new())
139 fn insert(&mut self, task: TaskHandle) {
140 let didnt_overwrite = (**self).insert(task);
141 assert!(didnt_overwrite);
144 fn remove(&mut self, task: &TaskHandle) {
145 let was_present = (**self).remove(task);
146 assert!(was_present);
149 fn consume(self) -> HashSetConsumeIterator<TaskHandle> {
154 // One of these per group of linked-failure tasks.
155 struct TaskGroupData {
156 // All tasks which might kill this group. When this is empty, the group
157 // can be "GC"ed (i.e., its link in the ancestor list can be removed).
159 // All tasks unidirectionally supervised by (directly or transitively)
160 // tasks in this group.
161 descendants: TaskSet,
163 type TaskGroupArc = Exclusive<Option<TaskGroupData>>;
165 type TaskGroupInner<'self> = &'self mut Option<TaskGroupData>;
167 // A taskgroup is 'dead' when nothing can cause it to fail; only members can.
168 fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
169 tg.members.is_empty()
172 // A list-like structure by which taskgroups keep track of all ancestor groups
173 // which may kill them. Needed for tasks to be able to remove themselves from
174 // ancestor groups upon exit. The list has a node for each "generation", and
175 // ends either at the root taskgroup (which has no ancestors) or at a
176 // taskgroup which was spawned-unlinked. Tasks from intermediate generations
177 // have references to the middle of the list; when intermediate generations
178 // die, their node in the list will be collected at a descendant's spawn-time.
179 struct AncestorNode {
180 // Since the ancestor list is recursive, we end up with references to
181 // exclusives within other exclusives. This is dangerous business (if
182 // circular references arise, deadlock and memory leaks are imminent).
183 // Hence we assert that this counter monotonically decreases as we
184 // approach the tail of the list.
186 // Handle to the tasks in the group of the current generation.
187 parent_group: TaskGroupArc,
188 // Recursive rest of the list.
189 ancestors: AncestorList,
192 struct AncestorList(Option<Exclusive<AncestorNode>>);
194 // Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety.
196 fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
203 fn access_ancestors<U>(x: &Exclusive<AncestorNode>,
204 blk: &fn(x: &mut AncestorNode) -> U) -> U {
210 #[inline] #[cfg(test)]
211 fn check_generation(younger: uint, older: uint) { assert!(younger > older); }
212 #[inline] #[cfg(not(test))]
213 fn check_generation(_younger: uint, _older: uint) { }
215 #[inline] #[cfg(test)]
216 fn incr_generation(ancestors: &AncestorList) -> uint {
217 ancestors.map_default(0, |arc| access_ancestors(arc, |a| a.generation+1))
219 #[inline] #[cfg(not(test))]
220 fn incr_generation(_ancestors: &AncestorList) -> uint { 0 }
222 // Iterates over an ancestor list.
223 // (1) Runs forward_blk on each ancestral taskgroup in the list
224 // (2) If forward_blk "break"s, runs optional bail_blk on all ancestral
225 // taskgroups that forward_blk already ran on successfully (Note: bail_blk
226 // is NOT called on the block that forward_blk broke on!).
227 // (3) As a bonus, coalesces away all 'dead' taskgroup nodes in the list.
228 fn each_ancestor(list: &mut AncestorList,
229 bail_blk: &fn(TaskGroupInner),
230 forward_blk: &fn(TaskGroupInner) -> bool)
232 // "Kickoff" call - there was no last generation.
233 return !coalesce(list, bail_blk, forward_blk, uint::max_value);
235 // Recursively iterates, and coalesces afterwards if needed. Returns
236 // whether or not unwinding is needed (i.e., !successful iteration).
237 fn coalesce(list: &mut AncestorList,
238 bail_blk: &fn(TaskGroupInner),
239 forward_blk: &fn(TaskGroupInner) -> bool,
240 last_generation: uint) -> bool {
241 let (coalesce_this, early_break) =
242 iterate(list, bail_blk, forward_blk, last_generation);
243 // What should our next ancestor end up being?
244 if coalesce_this.is_some() {
245 // Needed coalesce. Our next ancestor becomes our old
246 // ancestor's next ancestor. ("next = old_next->next;")
247 *list = coalesce_this.unwrap();
252 // Returns an optional list-to-coalesce and whether unwinding is needed.
253 // Option<ancestor_list>:
254 // Whether or not the ancestor taskgroup being iterated over is
255 // dead or not; i.e., it has no more tasks left in it, whether or not
256 // it has descendants. If dead, the caller shall coalesce it away.
258 // True if the supplied block did 'break', here or in any recursive
259 // calls. If so, must call the unwinder on all previous nodes.
260 fn iterate(ancestors: &mut AncestorList,
261 bail_blk: &fn(TaskGroupInner),
262 forward_blk: &fn(TaskGroupInner) -> bool,
263 last_generation: uint)
264 -> (Option<AncestorList>, bool) {
265 // At each step of iteration, three booleans are at play which govern
266 // how the iteration should behave.
267 // 'nobe_is_dead' - Should the list should be coalesced at this point?
268 // Largely unrelated to the other two.
269 // 'need_unwind' - Should we run the bail_blk at this point? (i.e.,
270 // do_continue was false not here, but down the line)
271 // 'do_continue' - Did the forward_blk succeed at this point? (i.e.,
272 // should we recurse? or should our callers unwind?)
274 let forward_blk = Cell::new(forward_blk);
276 // The map defaults to None, because if ancestors is None, we're at
277 // the end of the list, which doesn't make sense to coalesce.
278 do ancestors.map_default((None,false)) |ancestor_arc| {
279 // NB: Takes a lock! (this ancestor node)
280 do access_ancestors(ancestor_arc) |nobe| {
281 // Argh, but we couldn't give it to coalesce() otherwise.
282 let forward_blk = forward_blk.take();
283 // Check monotonicity
284 check_generation(last_generation, nobe.generation);
285 /*##########################################################*
286 * Step 1: Look at this ancestor group (call iterator block).
287 *##########################################################*/
288 let mut nobe_is_dead = false;
290 // NB: Takes a lock! (this ancestor node's parent group)
291 do access_group(&nobe.parent_group) |tg_opt| {
292 // Decide whether this group is dead. Note that the
293 // group being *dead* is disjoint from it *failing*.
294 nobe_is_dead = match *tg_opt {
295 Some(ref tg) => taskgroup_is_dead(tg),
298 // Call iterator block. (If the group is dead, it's
299 // safe to skip it. This will leave our TaskHandle
300 // hanging around in the group even after it's freed,
301 // but that's ok because, by virtue of the group being
302 // dead, nobody will ever kill-all (for) over it.)
303 if nobe_is_dead { true } else { forward_blk(tg_opt) }
305 /*##########################################################*
306 * Step 2: Recurse on the rest of the list; maybe coalescing.
307 *##########################################################*/
308 // 'need_unwind' is only set if blk returned true above, *and*
309 // the recursive call early-broke.
310 let mut need_unwind = false;
312 // NB: Takes many locks! (ancestor nodes & parent groups)
313 need_unwind = coalesce(&mut nobe.ancestors, |tg| bail_blk(tg),
314 forward_blk, nobe.generation);
316 /*##########################################################*
317 * Step 3: Maybe unwind; compute return info for our caller.
318 *##########################################################*/
319 if need_unwind && !nobe_is_dead {
320 do access_group(&nobe.parent_group) |tg_opt| {
324 // Decide whether our caller should unwind.
325 need_unwind = need_unwind || !do_continue;
326 // Tell caller whether or not to coalesce and/or unwind
328 // Swap the list out here; the caller replaces us with it.
329 let rest = util::replace(&mut nobe.ancestors,
331 (Some(rest), need_unwind)
340 // One of these per task.
341 pub struct Taskgroup {
342 // List of tasks with whose fates this one's is intertwined.
343 tasks: TaskGroupArc, // 'none' means the group has failed.
344 // Lists of tasks who will kill us if they fail, but whom we won't kill.
345 ancestors: AncestorList,
347 notifier: Option<AutoNotify>,
350 impl Drop for Taskgroup {
351 // Runs on task exit.
354 // FIXME(#4330) Need self by value to get mutability.
355 let this: &mut Taskgroup = transmute(self);
357 // If we are failing, the whole taskgroup needs to die.
358 do RuntimeGlue::with_task_handle_and_failing |me, failing| {
360 for x in this.notifier.mut_iter() {
363 // Take everybody down with us.
364 do access_group(&self.tasks) |tg| {
365 kill_taskgroup(tg, &me, self.is_main);
368 // Remove ourselves from the group(s).
369 do access_group(&self.tasks) |tg| {
370 leave_taskgroup(tg, &me, true);
373 // It doesn't matter whether this happens before or after dealing
374 // with our own taskgroup, so long as both happen before we die.
375 // We remove ourself from every ancestor we can, so no cleanup; no
377 do each_ancestor(&mut this.ancestors, |_| {}) |ancestor_group| {
378 leave_taskgroup(ancestor_group, &me, false);
386 pub fn Taskgroup(tasks: TaskGroupArc,
387 ancestors: AncestorList,
389 mut notifier: Option<AutoNotify>) -> Taskgroup {
390 for x in notifier.mut_iter() {
396 ancestors: ancestors,
403 notify_chan: Chan<TaskResult>,
407 impl Drop for AutoNotify {
409 let result = if self.failed { Failure } else { Success };
410 self.notify_chan.send(result);
414 fn AutoNotify(chan: Chan<TaskResult>) -> AutoNotify {
417 failed: true // Un-set above when taskgroup successfully made.
421 fn enlist_in_taskgroup(state: TaskGroupInner, me: TaskHandle,
422 is_member: bool) -> bool {
423 let me = Cell::new(me); // :(
424 // If 'None', the group was failing. Can't enlist.
425 do state.map_mut_default(false) |group| {
429 &mut group.descendants
430 }).insert(me.take());
435 // NB: Runs in destructor/post-exit context. Can't 'fail'.
436 fn leave_taskgroup(state: TaskGroupInner, me: &TaskHandle,
438 let me = Cell::new(me); // :(
439 // If 'None', already failing and we've already gotten a kill signal.
440 do state.map_mut |group| {
444 &mut group.descendants
445 }).remove(me.take());
449 // NB: Runs in destructor/post-exit context. Can't 'fail'.
450 fn kill_taskgroup(state: TaskGroupInner, me: &TaskHandle, is_main: bool) {
452 // NB: We could do the killing iteration outside of the group arc, by
453 // having "let mut newstate" here, swapping inside, and iterating
454 // after. But that would let other exiting tasks fall-through and exit
455 // while we were trying to kill them, causing potential
456 // use-after-free. A task's presence in the arc guarantees it's alive
457 // only while we hold the lock, so if we're failing, all concurrently
458 // exiting tasks must wait for us. To do it differently, we'd have to
459 // use the runtime's task refcounting, but that could leave task
460 // structs around long after their task exited.
461 let newstate = util::replace(state, None);
462 // Might already be None, if Somebody is failing simultaneously.
463 // That's ok; only one task needs to do the dirty work. (Might also
464 // see 'None' if Somebody already failed and we got a kill signal.)
465 if newstate.is_some() {
466 let TaskGroupData { members: members, descendants: descendants } =
468 for sibling in members.consume() {
469 // Skip self - killing ourself won't do much good.
471 RuntimeGlue::kill_task(sibling);
474 for child in descendants.consume() {
475 assert!(&child != me);
476 RuntimeGlue::kill_task(child);
478 // Only one task should ever do this.
480 RuntimeGlue::kill_all_tasks(me);
482 // Do NOT restore state to Some(..)! It stays None to indicate
483 // that the whole taskgroup is failing, to forbid new spawns.
485 // (note: multiple tasks may reach this point)
489 // FIXME (#2912): Work around core-vs-coretest function duplication. Can't use
490 // a proper closure because the #[test]s won't understand. Have to fake it.
491 fn taskgroup_key() -> local_data::Key<@@mut Taskgroup> {
492 unsafe { cast::transmute(-2) }
498 unsafe fn kill_task(task: TaskHandle) {
500 OldTask(ptr) => rt::rust_task_kill_other(ptr),
502 let mut handle = handle;
503 do handle.kill().map_move |killed_task| {
504 let killed_task = Cell::new(killed_task);
505 do Local::borrow::<Scheduler, ()> |sched| {
506 sched.enqueue_task(killed_task.take());
513 unsafe fn kill_all_tasks(task: &TaskHandle) {
515 OldTask(ptr) => rt::rust_task_kill_all(ptr),
516 // FIXME(#7544): Remove the kill_all feature entirely once the
517 // oldsched goes away.
518 NewTask(ref _handle) => rtabort!("can't kill_all in newsched"),
522 fn with_task_handle_and_failing(blk: &fn(TaskHandle, bool)) {
524 OldTaskContext => unsafe {
525 let me = rt::rust_get_task();
526 blk(OldTask(me), rt::rust_task_is_unwinding(me))
528 TaskContext => unsafe {
529 // Can't use safe borrow, because the taskgroup destructor needs to
530 // access the scheduler again to send kill signals to other tasks.
531 let me = Local::unsafe_borrow::<Task>();
532 // FIXME(#7544): Get rid of this clone by passing by-ref.
533 // Will probably have to wait until the old rt is gone.
534 blk(NewTask((*me).death.kill_handle.get_ref().clone()),
535 (*me).unwinder.unwinding)
537 SchedulerContext | GlobalContext => rtabort!("task dying in bad context"),
541 fn with_my_taskgroup<U>(blk: &fn(&Taskgroup) -> U) -> U {
543 OldTaskContext => unsafe {
544 let me = rt::rust_get_task();
545 do local_get(OldHandle(me), taskgroup_key()) |g| {
548 // Main task, doing first spawn ever. Lazily initialise here.
549 let mut members = TaskSet::new();
550 members.insert(OldTask(me));
551 let tasks = Exclusive::new(Some(TaskGroupData {
553 descendants: TaskSet::new(),
555 // Main task/group has no ancestors, no notifier, etc.
556 let group = @@mut Taskgroup(tasks, AncestorList(None),
558 local_set(OldHandle(me), taskgroup_key(), group);
561 Some(&group) => blk(&**group)
565 TaskContext => unsafe {
566 // Can't use safe borrow, because creating new hashmaps for the
567 // tasksets requires an rng, which needs to borrow the sched.
568 let me = Local::unsafe_borrow::<Task>();
569 blk(match (*me).taskgroup {
571 // First task in its (unlinked/unsupervised) taskgroup.
572 // Lazily initialize.
573 let mut members = TaskSet::new();
574 let my_handle = (*me).death.kill_handle.get_ref().clone();
575 members.insert(NewTask(my_handle));
576 let tasks = Exclusive::new(Some(TaskGroupData {
578 descendants: TaskSet::new(),
580 // FIXME(#7544): Remove the is_main flag entirely once
581 // the newsched goes away. The main taskgroup has no special
583 let group = Taskgroup(tasks, AncestorList(None), false, None);
584 (*me).taskgroup = Some(group);
585 (*me).taskgroup.get_ref()
587 Some(ref group) => group,
590 SchedulerContext | GlobalContext => rtabort!("spawning in bad context"),
595 // Returns 'None' in the case where the child's TG should be lazily initialized.
596 fn gen_child_taskgroup(linked: bool, supervised: bool)
597 -> Option<(TaskGroupArc, AncestorList, bool)> {
598 // FIXME(#7544): Not safe to lazily initialize in the old runtime. Remove
599 // this context check once 'spawn_raw_oldsched' is gone.
600 if context() == OldTaskContext || linked || supervised {
601 // with_my_taskgroup will lazily initialize the parent's taskgroup if
602 // it doesn't yet exist. We don't want to call it in the unlinked case.
603 do RuntimeGlue::with_my_taskgroup |spawner_group| {
604 let ancestors = AncestorList(spawner_group.ancestors.map(|x| x.clone()));
606 // Child is in the same group as spawner.
607 // Child's ancestors are spawner's ancestors.
608 // Propagate main-ness.
609 Some((spawner_group.tasks.clone(), ancestors, spawner_group.is_main))
611 // Child is in a separate group from spawner.
612 let g = Exclusive::new(Some(TaskGroupData {
613 members: TaskSet::new(),
614 descendants: TaskSet::new(),
616 let a = if supervised {
617 let new_generation = incr_generation(&ancestors);
618 assert!(new_generation < uint::max_value);
619 // Child's ancestors start with the spawner.
620 // Build a new node in the ancestor list.
621 AncestorList(Some(Exclusive::new(AncestorNode {
622 generation: new_generation,
623 parent_group: spawner_group.tasks.clone(),
624 ancestors: ancestors,
627 // Child has no ancestors.
638 // Set up membership in taskgroup and descendantship in all ancestor
639 // groups. If any enlistment fails, Some task was already failing, so
640 // don't let the child task run, and undo every successful enlistment.
641 fn enlist_many(child: TaskHandle, child_arc: &TaskGroupArc,
642 ancestors: &mut AncestorList) -> bool {
643 // Join this taskgroup.
644 let mut result = do access_group(child_arc) |child_tg| {
645 enlist_in_taskgroup(child_tg, child.clone(), true) // member
648 // Unwinding function in case any ancestral enlisting fails
649 let bail: &fn(TaskGroupInner) = |tg| { leave_taskgroup(tg, &child, false) };
650 // Attempt to join every ancestor group.
651 result = do each_ancestor(ancestors, bail) |ancestor_tg| {
652 // Enlist as a descendant, not as an actual member.
653 // Descendants don't kill ancestor groups on failure.
654 enlist_in_taskgroup(ancestor_tg, child.clone(), false)
656 // If any ancestor group fails, need to exit this group too.
658 do access_group(child_arc) |child_tg| {
659 leave_taskgroup(child_tg, &child, true); // member
666 pub fn spawn_raw(opts: TaskOpts, f: ~fn()) {
668 OldTaskContext => spawn_raw_oldsched(opts, f),
669 TaskContext => spawn_raw_newsched(opts, f),
670 SchedulerContext => fail!("can't spawn from scheduler context"),
671 GlobalContext => fail!("can't spawn from global context"),
675 fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) {
678 let child_data = Cell::new(gen_child_taskgroup(opts.linked, opts.supervised));
679 let indestructible = opts.indestructible;
681 let child_wrapper: ~fn() = || {
682 // Child task runs this code.
684 // If child data is 'None', the enlist is vacuously successful.
685 let enlist_success = do child_data.take().map_move_default(true) |child_data| {
686 let child_data = Cell::new(child_data); // :(
687 do Local::borrow::<Task, bool> |me| {
688 let (child_tg, ancestors, is_main) = child_data.take();
689 let mut ancestors = ancestors;
690 // FIXME(#7544): Optimize out the xadd in this clone, somehow.
691 let handle = me.death.kill_handle.get_ref().clone();
692 // Atomically try to get into all of our taskgroups.
693 if enlist_many(NewTask(handle), &child_tg, &mut ancestors) {
694 // Got in. We can run the provided child body, and can also run
695 // the taskgroup's exit-time-destructor afterward.
696 me.taskgroup = Some(Taskgroup(child_tg, ancestors, is_main, None));
703 // Should be run after the local-borrowed task is returned.
706 do unkillable { f() }
713 let mut task = unsafe {
714 if opts.sched.mode != SingleThreaded {
716 Task::build_child(child_wrapper)
718 Task::build_root(child_wrapper)
721 // Creating a 1:1 task:thread ...
722 let sched = Local::unsafe_borrow::<Scheduler>();
723 let sched_handle = (*sched).make_handle();
725 // Create a new scheduler to hold the new task
726 let new_loop = ~UvEventLoop::new();
727 let mut new_sched = ~Scheduler::new_special(new_loop,
728 (*sched).work_queue.clone(),
729 (*sched).sleeper_list.clone(),
732 let mut new_sched_handle = new_sched.make_handle();
734 // Allow the scheduler to exit when the pinned task exits
735 new_sched_handle.send(Shutdown);
737 // Pin the new task to the new scheduler
738 let new_task = if opts.watched {
739 Task::build_homed_child(child_wrapper, Sched(new_sched_handle))
741 Task::build_homed_root(child_wrapper, Sched(new_sched_handle))
744 // Create a task that will later be used to join with the new scheduler
745 // thread when it is ready to terminate
746 let (thread_port, thread_chan) = oneshot();
747 let thread_port_cell = Cell::new(thread_port);
748 let join_task = do Task::build_child() {
749 rtdebug!("running join task");
750 let thread_port = thread_port_cell.take();
751 let thread: Thread = thread_port.recv();
755 // Put the scheduler into another thread
756 let new_sched_cell = Cell::new(new_sched);
757 let orig_sched_handle_cell = Cell::new((*sched).make_handle());
758 let join_task_cell = Cell::new(join_task);
760 let thread = do Thread::start {
761 let mut new_sched = new_sched_cell.take();
762 let mut orig_sched_handle = orig_sched_handle_cell.take();
763 let join_task = join_task_cell.take();
765 let bootstrap_task = ~do Task::new_root(&mut new_sched.stack_pool) || {
766 rtdebug!("bootstrapping a 1:1 scheduler");
768 new_sched.bootstrap(bootstrap_task);
770 rtdebug!("enqueing join_task");
771 // Now tell the original scheduler to join with this thread
772 // by scheduling a thread-joining task on the original scheduler
773 orig_sched_handle.send(TaskFromFriend(join_task));
775 // NB: We can't simply send a message from here to another task
776 // because this code isn't running in a task and message passing doesn't
777 // work outside of tasks. Hence we're sending a scheduler message
778 // to execute a new task directly to a scheduler.
781 // Give the thread handle to the join task
782 thread_chan.send(thread);
784 // When this task is enqueued on the current scheduler it will then get
785 // forwarded to the scheduler to which it is pinned
790 if opts.notify_chan.is_some() {
791 let notify_chan = opts.notify_chan.take_unwrap();
792 let notify_chan = Cell::new(notify_chan);
793 let on_exit: ~fn(bool) = |success| {
794 notify_chan.take().send(
795 if success { Success } else { Failure }
798 task.death.on_exit = Some(on_exit);
801 task.name = opts.name.take();
802 rtdebug!("spawn calling run_task");
803 Scheduler::run_task(task);
807 fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) {
809 let (child_tg, ancestors, is_main) =
810 gen_child_taskgroup(opts.linked, opts.supervised).expect("old runtime needs TG");
813 let child_data = Cell::new((child_tg, ancestors, f));
814 // Being killed with the unsafe task/closure pointers would leak them.
816 let (child_tg, ancestors, f) = child_data.take(); // :(
817 // Create child task.
818 let new_task = match opts.sched.mode {
819 DefaultScheduler => rt::new_task(),
820 _ => new_task_in_sched()
822 assert!(!new_task.is_null());
823 // Getting killed after here would leak the task.
824 let child_wrapper = make_child_wrapper(new_task, child_tg,
825 ancestors, is_main, opts.notify_chan.take(), f);
827 let closure = cast::transmute(&child_wrapper);
829 // Getting killed between these two calls would free the child's
830 // closure. (Reordering them wouldn't help - then getting killed
831 // between them would leak.)
832 rt::start_task(new_task, closure);
833 cast::forget(child_wrapper);
837 // This function returns a closure-wrapper that we pass to the child task.
838 // (1) It sets up the notification channel.
839 // (2) It attempts to enlist in the child's group and all ancestor groups.
840 // (3a) If any of those fails, it leaves all groups, and does nothing.
841 // (3b) Otherwise it builds a task control structure and puts it in TLS,
842 // (4) ...and runs the provided body function.
843 fn make_child_wrapper(child: *rust_task, child_arc: TaskGroupArc,
844 ancestors: AncestorList, is_main: bool,
845 notify_chan: Option<Chan<TaskResult>>,
848 let child_data = Cell::new((notify_chan, child_arc, ancestors));
849 let result: ~fn() = || {
850 let (notify_chan, child_arc, ancestors) = child_data.take(); // :(
851 let mut ancestors = ancestors;
852 // Child task runs this code.
854 // Even if the below code fails to kick the child off, we must
855 // send Something on the notify channel.
857 let notifier = notify_chan.map_move(|c| AutoNotify(c));
859 if enlist_many(OldTask(child), &child_arc, &mut ancestors) {
860 let group = @@mut Taskgroup(child_arc, ancestors, is_main, notifier);
862 local_set(OldHandle(child), taskgroup_key(), group);
865 // Run the child's body.
868 // TLS cleanup code will exit the taskgroup.
871 // Run the box annihilator.
872 // FIXME #4428: Crashy.
873 // unsafe { cleanup::annihilate(); }
878 fn new_task_in_sched() -> *rust_task {
880 let sched_id = rt::rust_new_sched(1);
881 rt::rust_new_task_in_sched(sched_id)
887 fn test_spawn_raw_simple() {
888 let (po, ch) = stream();
889 do spawn_raw(default_task_opts()) {
896 #[ignore(cfg(windows))]
897 fn test_spawn_raw_unsupervise() {
898 let opts = task::TaskOpts {
902 .. default_task_opts()
910 #[ignore(cfg(windows))]
911 fn test_spawn_raw_notify_success() {
912 let (notify_po, notify_ch) = comm::stream();
914 let opts = task::TaskOpts {
915 notify_chan: Some(notify_ch),
916 .. default_task_opts()
920 assert_eq!(notify_po.recv(), Success);
924 #[ignore(cfg(windows))]
925 fn test_spawn_raw_notify_failure() {
926 // New bindings for these
927 let (notify_po, notify_ch) = comm::stream();
929 let opts = task::TaskOpts {
932 notify_chan: Some(notify_ch),
933 .. default_task_opts()
938 assert_eq!(notify_po.recv(), Failure);