1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Language-level runtime services that should reasonably expected
12 //! to be available 'everywhere'. Local heaps, GC, unwinding,
13 //! local storage, and logging. Even a 'freestanding' Rust would likely want
14 //! to implement this.
16 use super::local_heap::LocalHeap;
24 use libc::{c_void, uintptr_t, c_char, size_t};
26 use option::{Option, Some, None};
27 use rt::borrowck::BorrowRecord;
29 use rt::context::Context;
35 use rt::logging::StdErrLogger;
36 use rt::sched::{Scheduler, SchedHandle};
37 use rt::stack::{StackSegment, StackPool};
38 use send_str::SendStr;
39 use task::LinkedFailure;
40 use task::spawn::Taskgroup;
41 use unstable::finally::Finally;
43 // The Task struct represents all state associated with a rust
44 // task. There are at this point two primary "subtypes" of task,
45 // however instead of using a subtype we just have a "task_type" field
46 // in the struct. This contains a pointer to another struct that holds
47 // the type-specific state.
51 priv gc: GarbageCollector,
52 storage: LocalStorage,
53 logger: Option<StdErrLogger>,
55 taskgroup: Option<Taskgroup>,
58 name: Option<SendStr>,
59 coroutine: Option<Coroutine>,
60 sched: Option<~Scheduler>,
62 // Dynamic borrowck debugging info
63 borrow_list: Option<~[BorrowRecord]>,
64 stdout_handle: Option<~Writer>,
68 GreenTask(Option<SchedHome>),
72 /// A coroutine is nothing more than a (register context, stack) pair.
73 pub struct Coroutine {
74 /// The segment of stack on which the task is currently running or
75 /// if the task is blocked, on which the task will resume
78 /// Servo needs this to be public in order to tell SpiderMonkey
79 /// about the stack bounds.
80 current_stack_segment: StackSegment,
81 /// Always valid if the task is alive and not running.
82 saved_context: Context
85 /// Some tasks have a dedicated home scheduler that they must run on.
91 pub struct GarbageCollector;
92 pub struct LocalStorage(Option<local_data::Map>);
94 /// Represents the reason for the current unwinding process
95 pub enum UnwindResult {
96 /// The task is ending successfully
99 /// The Task is failing with reason `~Any`
104 /// Returns `true` if this `UnwindResult` is a failure
106 pub fn is_failure(&self) -> bool {
113 /// Returns `true` if this `UnwindResult` is a success
115 pub fn is_success(&self) -> bool {
123 pub struct Unwinder {
129 fn to_unwind_result(&mut self) -> UnwindResult {
131 Failure(self.cause.take().unwrap())
140 // A helper to build a new task using the dynamically found
141 // scheduler and task. Only works in GreenTask context.
142 pub fn build_homed_child(stack_size: Option<uint>,
146 let f = Cell::new(f);
147 let home = Cell::new(home);
148 do Local::borrow |running_task: &mut Task| {
149 let mut sched = running_task.sched.take_unwrap();
150 let new_task = ~running_task.new_child_homed(&mut sched.stack_pool,
154 running_task.sched = Some(sched);
159 pub fn build_child(stack_size: Option<uint>, f: proc()) -> ~Task {
160 Task::build_homed_child(stack_size, f, AnySched)
163 pub fn build_homed_root(stack_size: Option<uint>,
167 let f = Cell::new(f);
168 let home = Cell::new(home);
169 do Local::borrow |running_task: &mut Task| {
170 let mut sched = running_task.sched.take_unwrap();
171 let new_task = ~Task::new_root_homed(&mut sched.stack_pool,
175 running_task.sched = Some(sched);
180 pub fn build_root(stack_size: Option<uint>, f: proc()) -> ~Task {
181 Task::build_homed_root(stack_size, f, AnySched)
184 pub fn new_sched_task() -> Task {
186 heap: LocalHeap::new(),
187 gc: GarbageCollector,
188 storage: LocalStorage(None),
190 unwinder: Unwinder { unwinding: false, cause: None },
194 coroutine: Some(Coroutine::empty()),
197 task_type: SchedTask,
203 pub fn new_root(stack_pool: &mut StackPool,
204 stack_size: Option<uint>,
205 start: proc()) -> Task {
206 Task::new_root_homed(stack_pool, stack_size, AnySched, start)
209 pub fn new_child(&mut self,
210 stack_pool: &mut StackPool,
211 stack_size: Option<uint>,
212 start: proc()) -> Task {
213 self.new_child_homed(stack_pool, stack_size, AnySched, start)
216 pub fn new_root_homed(stack_pool: &mut StackPool,
217 stack_size: Option<uint>,
219 start: proc()) -> Task {
221 heap: LocalHeap::new(),
222 gc: GarbageCollector,
223 storage: LocalStorage(None),
225 unwinder: Unwinder { unwinding: false, cause: None },
230 coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
232 task_type: GreenTask(Some(home)),
238 pub fn new_child_homed(&mut self,
239 stack_pool: &mut StackPool,
240 stack_size: Option<uint>,
242 start: proc()) -> Task {
244 heap: LocalHeap::new(),
245 gc: GarbageCollector,
246 storage: LocalStorage(None),
248 unwinder: Unwinder { unwinding: false, cause: None },
250 // FIXME(#7544) make watching optional
251 death: self.death.new_child(),
254 coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
256 task_type: GreenTask(Some(home)),
262 pub fn give_home(&mut self, new_home: SchedHome) {
263 match self.task_type {
264 GreenTask(ref mut home) => {
265 *home = Some(new_home);
268 rtabort!("type error: used SchedTask as GreenTask");
273 pub fn take_unwrap_home(&mut self) -> SchedHome {
274 match self.task_type {
275 GreenTask(ref mut home) => {
276 let out = home.take_unwrap();
280 rtabort!("type error: used SchedTask as GreenTask");
285 pub fn run(&mut self, f: ||) {
286 rtdebug!("run called on task: {}", borrow::to_uint(self));
288 // The only try/catch block in the world. Attempt to run the task's
289 // client-specified code and catch any failures.
290 do self.unwinder.try {
292 // Run the task main function, then do some cleanup.
295 // First, destroy task-local storage. This may run user dtors.
297 // FIXME #8302: Dear diary. I'm so tired and confused.
298 // There's some interaction in rustc between the box
299 // annihilator and the TLS dtor by which TLS is
300 // accessed from annihilated box dtors *after* TLS is
301 // destroyed. Somehow setting TLS back to null, as the
302 // old runtime did, makes this work, but I don't currently
303 // understand how. I would expect that, if the annihilator
304 // reinvokes TLS while TLS is uninitialized, that
305 // TLS would be reinitialized but never destroyed,
306 // but somehow this works. I have no idea what's going
307 // on but this seems to make things magically work. FML.
309 // (added after initial comment) A possible interaction here is
310 // that the destructors for the objects in TLS themselves invoke
311 // TLS, or possibly some destructors for those objects being
312 // annihilated invoke TLS. Sadly these two operations seemed to
313 // be intertwined, and miraculously work for now...
316 // Destroy remaining boxes. Also may run user dtors.
317 unsafe { cleanup::annihilate(); }
319 // Finally flush and destroy any output handles which the task
320 // owns. There are no boxes here, and no user destructors should
321 // run after this any more.
322 match self.stdout_handle.take() {
324 let mut handle = handle;
333 // Cleanup the dynamic borrowck debugging info
334 borrowck::clear_task_borrow_list();
336 // NB. We pass the taskgroup into death so that it can be dropped while
337 // the unkillable counter is set. This is necessary for when the
338 // taskgroup destruction code drops references on KillHandles, which
339 // might require using unkillable (to synchronize with an unwrapper).
340 self.death.collect_failure(self.unwinder.to_unwind_result(), self.taskgroup.take());
341 self.destroyed = true;
344 // New utility functions for homes.
346 pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
347 match self.task_type {
348 GreenTask(Some(AnySched)) => { false }
349 GreenTask(Some(Sched(SchedHandle { sched_id: ref id, _}))) => {
350 *id == sched.sched_id()
353 rtabort!("task without home");
357 rtabort!("type error: expected: GreenTask, found: SchedTask");
362 pub fn homed(&self) -> bool {
363 match self.task_type {
364 GreenTask(Some(AnySched)) => { false }
365 GreenTask(Some(Sched(SchedHandle { _ }))) => { true }
367 rtabort!("task without home");
370 rtabort!("type error: expected: GreenTask, found: SchedTask");
375 // Grab both the scheduler and the task from TLS and check if the
376 // task is executing on an appropriate scheduler.
377 pub fn on_appropriate_sched() -> bool {
378 do Local::borrow |task: &mut Task| {
379 let sched_id = task.sched.get_ref().sched_id();
380 let sched_run_anything = task.sched.get_ref().run_anything;
381 match task.task_type {
382 GreenTask(Some(AnySched)) => {
383 rtdebug!("anysched task in sched check ****");
386 GreenTask(Some(Sched(SchedHandle { sched_id: ref id, _ }))) => {
387 rtdebug!("homed task in sched check ****");
391 rtabort!("task without home");
394 rtabort!("type error: expected: GreenTask, found: SchedTask");
403 rtdebug!("called drop for a task: {}", borrow::to_uint(self));
404 rtassert!(self.destroyed);
408 // Coroutines represent nothing more than a context and a stack
413 pub fn new(stack_pool: &mut StackPool,
414 stack_size: Option<uint>,
417 let stack_size = match stack_size {
419 None => env::min_stack()
421 let start = Coroutine::build_start_wrapper(start);
422 let mut stack = stack_pool.take_segment(stack_size);
423 let initial_context = Context::new(start, &mut stack);
425 current_stack_segment: stack,
426 saved_context: initial_context
430 pub fn empty() -> Coroutine {
432 current_stack_segment: StackSegment::new(0),
433 saved_context: Context::empty()
437 fn build_start_wrapper(start: proc()) -> proc() {
438 let start_cell = Cell::new(start);
439 let wrapper: proc() = || {
440 // First code after swap to this new context. Run our
444 // Again - might work while safe, or it might not.
445 do Local::borrow |sched: &mut Scheduler| {
446 sched.run_cleanup_job();
449 // To call the run method on a task we need a direct
450 // reference to it. The task is in TLS, so we can
451 // simply unsafe_borrow it to get this reference. We
452 // need to still have the task in TLS though, so we
453 // need to unsafe_borrow.
454 let task: *mut Task = Local::unsafe_borrow();
457 // N.B. Removing `start` from the start wrapper
458 // closure by emptying a cell is critical for
459 // correctness. The ~Task pointer, and in turn the
460 // closure used to initialize the first call
461 // frame, is destroyed in the scheduler context,
462 // not task context. So any captured closures must
463 // not contain user-definable dtors that expect to
464 // be in task context. By moving `start` out of
465 // the closure, all the user code goes our of
466 // scope while the task is still running.
467 let start = start_cell.take();
472 // We remove the sched from the Task in TLS right now.
473 let sched: ~Scheduler = Local::take();
474 // ... allowing us to give it away when performing a
475 // scheduling operation.
476 sched.terminate_current_task()
481 /// Destroy coroutine and try to reuse stack segment.
482 pub fn recycle(self, stack_pool: &mut StackPool) {
484 Coroutine { current_stack_segment, _ } => {
485 stack_pool.give_segment(current_stack_segment);
493 // Just a sanity check to make sure we are catching a Rust-thrown exception
494 static UNWIND_TOKEN: uintptr_t = 839147;
497 pub fn try(&mut self, f: ||) {
498 use unstable::raw::Closure;
501 let closure: Closure = transmute(f);
502 let code = transmute(closure.code);
503 let env = transmute(closure.env);
505 let token = rust_try(try_fn, code, env);
506 assert!(token == 0 || token == UNWIND_TOKEN);
509 extern fn try_fn(code: *c_void, env: *c_void) {
511 let closure: Closure = Closure {
512 code: transmute(code),
515 let closure: || = transmute(closure);
521 fn rust_try(f: extern "C" fn(*c_void, *c_void),
523 data: *c_void) -> uintptr_t;
527 pub fn begin_unwind(&mut self, cause: ~Any) -> ! {
528 self.unwinding = true;
529 self.cause = Some(cause);
531 rust_begin_unwind(UNWIND_TOKEN);
532 return transmute(());
535 fn rust_begin_unwind(token: uintptr_t);
540 /// This function is invoked from rust's current __morestack function. Segmented
541 /// stacks are currently not enabled as segmented stacks, but rather one giant
542 /// stack segment. This means that whenever we run out of stack, we want to
543 /// truly consider it to be stack overflow rather than allocating a new stack.
544 #[no_mangle] // - this is called from C code
545 #[no_split_stack] // - it would be sad for this function to trigger __morestack
546 #[doc(hidden)] // - Function must be `pub` to get exported, but it's
547 // irrelevant for documentation purposes.
548 pub extern "C" fn rust_stack_exhausted() {
549 use rt::in_green_task_context;
551 use rt::local::Local;
552 use unstable::intrinsics;
555 // We're calling this function because the stack just ran out. We need
556 // to call some other rust functions, but if we invoke the functions
557 // right now it'll just trigger this handler being called again. In
558 // order to alleviate this, we move the stack limit to be inside of the
559 // red zone that was allocated for exactly this reason.
560 let limit = context::get_sp_limit();
561 context::record_sp_limit(limit - context::RED_ZONE / 2);
563 // This probably isn't the best course of action. Ideally one would want
564 // to unwind the stack here instead of just aborting the entire process.
565 // This is a tricky problem, however. There's a few things which need to
568 // 1. We're here because of a stack overflow, yet unwinding will run
569 // destructors and hence arbitrary code. What if that code overflows
570 // the stack? One possibility is to use the above allocation of an
571 // extra 10k to hope that we don't hit the limit, and if we do then
572 // abort the whole program. Not the best, but kind of hard to deal
573 // with unless we want to switch stacks.
575 // 2. LLVM will optimize functions based on whether they can unwind or
576 // not. It will flag functions with 'nounwind' if it believes that
577 // the function cannot trigger unwinding, but if we do unwind on
578 // stack overflow then it means that we could unwind in any function
579 // anywhere. We would have to make sure that LLVM only places the
580 // nounwind flag on functions which don't call any other functions.
582 // 3. The function that overflowed may have owned arguments. These
583 // arguments need to have their destructors run, but we haven't even
584 // begun executing the function yet, so unwinding will not run the
585 // any landing pads for these functions. If this is ignored, then
586 // the arguments will just be leaked.
588 // Exactly what to do here is a very delicate topic, and is possibly
589 // still up in the air for what exactly to do. Some relevant issues:
591 // #3555 - out-of-stack failure leaks arguments
592 // #3695 - should there be a stack limit?
593 // #9855 - possible strategies which could be taken
594 // #9854 - unwinding on windows through __morestack has never worked
595 // #2361 - possible implementation of not using landing pads
597 if in_green_task_context() {
598 do Local::borrow |task: &mut Task| {
599 let n = task.name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
601 // See the message below for why this is not emitted to the
602 // task's logger. This has the additional conundrum of the
603 // logger may not be initialized just yet, meaning that an FFI
604 // call would happen to initialized it (calling out to libuv),
605 // and the FFI call needs 2MB of stack when we just ran out.
606 rterrln!("task '{}' has overflowed its stack", n);
609 rterrln!("stack overflow in non-task context");
616 /// This is the entry point of unwinding for things like lang items and such.
617 /// The arguments are normally generated by the compiler, and need to
618 /// have static lifetimes.
619 pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! {
624 fn static_char_ptr(p: *c_char) -> &'static str {
625 let s = unsafe { CString::new(p, false) };
627 Some(s) => unsafe { transmute::<&str, &'static str>(s) },
628 None => rtabort!("message wasn't utf8?")
632 let msg = static_char_ptr(msg);
633 let file = static_char_ptr(file);
635 begin_unwind(msg, file, line as uint)
638 /// This is the entry point of unwinding for fail!() and assert!().
639 pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
641 use rt::in_green_task_context;
642 use rt::local::Local;
645 use unstable::intrinsics;
649 // Note that this should be the only allocation performed in this block.
650 // Currently this means that fail!() on OOM will invoke this code path,
651 // but then again we're not really ready for failing on OOM anyway. If
652 // we do start doing this, then we should propagate this allocation to
653 // be performed in the parent of this task instead of the task that's
655 let msg = ~msg as ~Any;
658 //let msg: &Any = msg;
659 let msg_s = match msg.as_ref::<&'static str>() {
661 None => match msg.as_ref::<~str>() {
662 Some(s) => s.as_slice(),
663 None => match msg.as_ref::<LinkedFailure>() {
664 Some(*) => "linked failure",
670 if !in_green_task_context() {
671 rterrln!("failed in non-task context at '{}', {}:{}",
676 task = Local::unsafe_borrow();
677 let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
679 // XXX: this should no get forcibly printed to the console, this should
680 // either be sent to the parent task (ideally), or get printed to
681 // the task's logger. Right now the logger is actually a uvio
682 // instance, which uses unkillable blocks internally for various
683 // reasons. This will cause serious trouble if the task is failing
684 // due to mismanagment of its own kill flag, so calling our own
685 // logger in its current state is a bit of a problem.
687 rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line);
689 if (*task).unwinder.unwinding {
690 rtabort!("unwinding again");
694 (*task).unwinder.begin_unwind(msg);
705 do run_in_newsched_task() {
716 do run_in_newsched_task() {
717 local_data_key!(key: @~str)
718 local_data::set(key, @~"data");
719 assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data");
720 local_data_key!(key2: @~str)
721 local_data::set(key2, @~"data");
722 assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data");
728 do run_in_newsched_task() {
729 let result = spawntask_try(||());
730 rtdebug!("trying first assert");
731 assert!(result.is_ok());
732 let result = spawntask_try(|| fail!());
733 rtdebug!("trying second assert");
734 assert!(result.is_err());
740 do run_in_uv_task() {
741 use rand::{rng, Rng};
743 let _ = r.next_u32();
749 do run_in_uv_task() {
750 info!("here i am. logging in a newsched task");
758 do run_in_newsched_task {
759 let (port, chan) = oneshot();
761 assert!(port.recv() == 10);
769 do run_in_newsched_task() {
770 let (port, chan) = stream();
772 assert!(port.recv() == 10);
777 fn comm_shared_chan() {
780 do run_in_newsched_task() {
781 let (port, chan) = stream();
782 let chan = SharedChan::new(chan);
784 assert!(port.recv() == 10);
789 fn linked_failure() {
790 do run_in_newsched_task() {
791 let res = do spawntask_try {
792 spawntask_random(|| fail!());
794 assert!(res.is_err());
800 use option::{Option, Some, None};
802 do run_in_newsched_task {
804 next: Option<@mut List>,
807 let a = @mut List { next: None };
808 let b = @mut List { next: Some(a) };
816 fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) }