1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Language-level runtime services that should reasonably expected
12 //! to be available 'everywhere'. Local heaps, GC, unwinding,
13 //! local storage, and logging. Even a 'freestanding' Rust would likely want
14 //! to implement this.
16 use super::local_heap::LocalHeap;
23 use libc::{c_char, size_t};
25 use option::{Option, Some, None};
26 use rt::borrowck::BorrowRecord;
28 use rt::context::Context;
32 use rt::logging::StdErrLogger;
33 use rt::sched::{Scheduler, SchedHandle};
34 use rt::stack::{StackSegment, StackPool};
35 use rt::unwind::Unwinder;
36 use send_str::SendStr;
37 use unstable::finally::Finally;
38 use unstable::mutex::Mutex;
40 // The Task struct represents all state associated with a rust
41 // task. There are at this point two primary "subtypes" of task,
42 // however instead of using a subtype we just have a "task_type" field
43 // in the struct. This contains a pointer to another struct that holds
44 // the type-specific state.
48 priv gc: GarbageCollector,
49 storage: LocalStorage,
50 logger: Option<StdErrLogger>,
54 name: Option<SendStr>,
55 coroutine: Option<Coroutine>,
56 sched: Option<~Scheduler>,
58 // Dynamic borrowck debugging info
59 borrow_list: Option<~[BorrowRecord]>,
60 stdout_handle: Option<~Writer>,
62 // See the comments in the scheduler about why this is necessary
63 nasty_deschedule_lock: Mutex,
67 GreenTask(Option<SchedHome>),
71 /// A coroutine is nothing more than a (register context, stack) pair.
72 pub struct Coroutine {
73 /// The segment of stack on which the task is currently running or
74 /// if the task is blocked, on which the task will resume
77 /// Servo needs this to be public in order to tell SpiderMonkey
78 /// about the stack bounds.
79 current_stack_segment: StackSegment,
80 /// Always valid if the task is alive and not running.
81 saved_context: Context
84 /// Some tasks have a dedicated home scheduler that they must run on.
90 pub struct GarbageCollector;
91 pub struct LocalStorage(Option<local_data::Map>);
95 // A helper to build a new task using the dynamically found
96 // scheduler and task. Only works in GreenTask context.
97 pub fn build_homed_child(stack_size: Option<uint>,
101 let mut running_task = Local::borrow(None::<Task>);
102 let mut sched = running_task.get().sched.take_unwrap();
103 let new_task = ~running_task.get()
104 .new_child_homed(&mut sched.stack_pool,
108 running_task.get().sched = Some(sched);
112 pub fn build_child(stack_size: Option<uint>, f: proc()) -> ~Task {
113 Task::build_homed_child(stack_size, f, AnySched)
116 pub fn build_homed_root(stack_size: Option<uint>,
120 let mut running_task = Local::borrow(None::<Task>);
121 let mut sched = running_task.get().sched.take_unwrap();
122 let new_task = ~Task::new_root_homed(&mut sched.stack_pool,
126 running_task.get().sched = Some(sched);
130 pub fn build_root(stack_size: Option<uint>, f: proc()) -> ~Task {
131 Task::build_homed_root(stack_size, f, AnySched)
134 pub fn new_sched_task() -> Task {
136 heap: LocalHeap::new(),
137 gc: GarbageCollector,
138 storage: LocalStorage(None),
140 unwinder: Unwinder { unwinding: false, cause: None },
143 coroutine: Some(Coroutine::empty()),
146 task_type: SchedTask,
149 nasty_deschedule_lock: unsafe { Mutex::new() },
153 pub fn new_root(stack_pool: &mut StackPool,
154 stack_size: Option<uint>,
155 start: proc()) -> Task {
156 Task::new_root_homed(stack_pool, stack_size, AnySched, start)
159 pub fn new_child(&mut self,
160 stack_pool: &mut StackPool,
161 stack_size: Option<uint>,
162 start: proc()) -> Task {
163 self.new_child_homed(stack_pool, stack_size, AnySched, start)
166 pub fn new_root_homed(stack_pool: &mut StackPool,
167 stack_size: Option<uint>,
169 start: proc()) -> Task {
171 heap: LocalHeap::new(),
172 gc: GarbageCollector,
173 storage: LocalStorage(None),
175 unwinder: Unwinder { unwinding: false, cause: None },
179 coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
181 task_type: GreenTask(Some(home)),
184 nasty_deschedule_lock: unsafe { Mutex::new() },
188 pub fn new_child_homed(&mut self,
189 stack_pool: &mut StackPool,
190 stack_size: Option<uint>,
192 start: proc()) -> Task {
194 heap: LocalHeap::new(),
195 gc: GarbageCollector,
196 storage: LocalStorage(None),
198 unwinder: Unwinder { unwinding: false, cause: None },
202 coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
204 task_type: GreenTask(Some(home)),
207 nasty_deschedule_lock: unsafe { Mutex::new() },
211 pub fn give_home(&mut self, new_home: SchedHome) {
212 match self.task_type {
213 GreenTask(ref mut home) => {
214 *home = Some(new_home);
217 rtabort!("type error: used SchedTask as GreenTask");
222 pub fn take_unwrap_home(&mut self) -> SchedHome {
223 match self.task_type {
224 GreenTask(ref mut home) => {
225 let out = home.take_unwrap();
229 rtabort!("type error: used SchedTask as GreenTask");
234 pub fn run(&mut self, f: ||) {
235 rtdebug!("run called on task: {}", borrow::to_uint(self));
237 // The only try/catch block in the world. Attempt to run the task's
238 // client-specified code and catch any failures.
239 self.unwinder.try(|| {
241 // Run the task main function, then do some cleanup.
244 // First, destroy task-local storage. This may run user dtors.
246 // FIXME #8302: Dear diary. I'm so tired and confused.
247 // There's some interaction in rustc between the box
248 // annihilator and the TLS dtor by which TLS is
249 // accessed from annihilated box dtors *after* TLS is
250 // destroyed. Somehow setting TLS back to null, as the
251 // old runtime did, makes this work, but I don't currently
252 // understand how. I would expect that, if the annihilator
253 // reinvokes TLS while TLS is uninitialized, that
254 // TLS would be reinitialized but never destroyed,
255 // but somehow this works. I have no idea what's going
256 // on but this seems to make things magically work. FML.
258 // (added after initial comment) A possible interaction here is
259 // that the destructors for the objects in TLS themselves invoke
260 // TLS, or possibly some destructors for those objects being
261 // annihilated invoke TLS. Sadly these two operations seemed to
262 // be intertwined, and miraculously work for now...
265 // Destroy remaining boxes. Also may run user dtors.
266 unsafe { cleanup::annihilate(); }
268 // Finally flush and destroy any output handles which the task
269 // owns. There are no boxes here, and no user destructors should
270 // run after this any more.
271 match self.stdout_handle.take() {
273 let mut handle = handle;
282 // Cleanup the dynamic borrowck debugging info
283 borrowck::clear_task_borrow_list();
285 self.death.collect_failure(self.unwinder.result());
286 self.destroyed = true;
289 // New utility functions for homes.
291 pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
292 match self.task_type {
293 GreenTask(Some(AnySched)) => { false }
294 GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => {
295 *id == sched.sched_id()
298 rtabort!("task without home");
302 rtabort!("type error: expected: GreenTask, found: SchedTask");
307 pub fn homed(&self) -> bool {
308 match self.task_type {
309 GreenTask(Some(AnySched)) => { false }
310 GreenTask(Some(Sched(SchedHandle { .. }))) => { true }
312 rtabort!("task without home");
315 rtabort!("type error: expected: GreenTask, found: SchedTask");
320 // Grab both the scheduler and the task from TLS and check if the
321 // task is executing on an appropriate scheduler.
322 pub fn on_appropriate_sched() -> bool {
323 let mut task = Local::borrow(None::<Task>);
324 let sched_id = task.get().sched.get_ref().sched_id();
325 let sched_run_anything = task.get().sched.get_ref().run_anything;
326 match task.get().task_type {
327 GreenTask(Some(AnySched)) => {
328 rtdebug!("anysched task in sched check ****");
331 GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => {
332 rtdebug!("homed task in sched check ****");
336 rtabort!("task without home");
339 rtabort!("type error: expected: GreenTask, found: SchedTask");
347 rtdebug!("called drop for a task: {}", borrow::to_uint(self));
348 rtassert!(self.destroyed);
350 unsafe { self.nasty_deschedule_lock.destroy(); }
354 // Coroutines represent nothing more than a context and a stack
359 pub fn new(stack_pool: &mut StackPool,
360 stack_size: Option<uint>,
363 let stack_size = match stack_size {
365 None => env::min_stack()
367 let start = Coroutine::build_start_wrapper(start);
368 let mut stack = stack_pool.take_segment(stack_size);
369 let initial_context = Context::new(start, &mut stack);
371 current_stack_segment: stack,
372 saved_context: initial_context
376 pub fn empty() -> Coroutine {
378 current_stack_segment: StackSegment::new(0),
379 saved_context: Context::empty()
383 fn build_start_wrapper(start: proc()) -> proc() {
384 let wrapper: proc() = proc() {
385 // First code after swap to this new context. Run our
389 // Again - might work while safe, or it might not.
391 let mut sched = Local::borrow(None::<Scheduler>);
392 sched.get().run_cleanup_job();
395 // To call the run method on a task we need a direct
396 // reference to it. The task is in TLS, so we can
397 // simply unsafe_borrow it to get this reference. We
398 // need to still have the task in TLS though, so we
399 // need to unsafe_borrow.
400 let task: *mut Task = Local::unsafe_borrow();
402 let mut start_cell = Some(start);
404 // N.B. Removing `start` from the start wrapper
405 // closure by emptying a cell is critical for
406 // correctness. The ~Task pointer, and in turn the
407 // closure used to initialize the first call
408 // frame, is destroyed in the scheduler context,
409 // not task context. So any captured closures must
410 // not contain user-definable dtors that expect to
411 // be in task context. By moving `start` out of
412 // the closure, all the user code goes our of
413 // scope while the task is still running.
414 let start = start_cell.take_unwrap();
419 // We remove the sched from the Task in TLS right now.
420 let sched: ~Scheduler = Local::take();
421 // ... allowing us to give it away when performing a
422 // scheduling operation.
423 sched.terminate_current_task()
428 /// Destroy coroutine and try to reuse stack segment.
429 pub fn recycle(self, stack_pool: &mut StackPool) {
431 Coroutine { current_stack_segment, .. } => {
432 stack_pool.give_segment(current_stack_segment);
439 /// This function is invoked from rust's current __morestack function. Segmented
440 /// stacks are currently not enabled as segmented stacks, but rather one giant
441 /// stack segment. This means that whenever we run out of stack, we want to
442 /// truly consider it to be stack overflow rather than allocating a new stack.
443 #[no_mangle] // - this is called from C code
444 #[no_split_stack] // - it would be sad for this function to trigger __morestack
445 #[doc(hidden)] // - Function must be `pub` to get exported, but it's
446 // irrelevant for documentation purposes.
447 #[cfg(not(test))] // in testing, use the original libstd's version
448 pub extern "C" fn rust_stack_exhausted() {
450 use rt::in_green_task_context;
452 use rt::local::Local;
453 use unstable::intrinsics;
456 // We're calling this function because the stack just ran out. We need
457 // to call some other rust functions, but if we invoke the functions
458 // right now it'll just trigger this handler being called again. In
459 // order to alleviate this, we move the stack limit to be inside of the
460 // red zone that was allocated for exactly this reason.
461 let limit = context::get_sp_limit();
462 context::record_sp_limit(limit - context::RED_ZONE / 2);
464 // This probably isn't the best course of action. Ideally one would want
465 // to unwind the stack here instead of just aborting the entire process.
466 // This is a tricky problem, however. There's a few things which need to
469 // 1. We're here because of a stack overflow, yet unwinding will run
470 // destructors and hence arbitrary code. What if that code overflows
471 // the stack? One possibility is to use the above allocation of an
472 // extra 10k to hope that we don't hit the limit, and if we do then
473 // abort the whole program. Not the best, but kind of hard to deal
474 // with unless we want to switch stacks.
476 // 2. LLVM will optimize functions based on whether they can unwind or
477 // not. It will flag functions with 'nounwind' if it believes that
478 // the function cannot trigger unwinding, but if we do unwind on
479 // stack overflow then it means that we could unwind in any function
480 // anywhere. We would have to make sure that LLVM only places the
481 // nounwind flag on functions which don't call any other functions.
483 // 3. The function that overflowed may have owned arguments. These
484 // arguments need to have their destructors run, but we haven't even
485 // begun executing the function yet, so unwinding will not run the
486 // any landing pads for these functions. If this is ignored, then
487 // the arguments will just be leaked.
489 // Exactly what to do here is a very delicate topic, and is possibly
490 // still up in the air for what exactly to do. Some relevant issues:
492 // #3555 - out-of-stack failure leaks arguments
493 // #3695 - should there be a stack limit?
494 // #9855 - possible strategies which could be taken
495 // #9854 - unwinding on windows through __morestack has never worked
496 // #2361 - possible implementation of not using landing pads
498 if in_green_task_context() {
499 let mut task = Local::borrow(None::<Task>);
503 .map(|n| n.as_slice())
504 .unwrap_or("<unnamed>");
506 // See the message below for why this is not emitted to the
507 // task's logger. This has the additional conundrum of the
508 // logger may not be initialized just yet, meaning that an FFI
509 // call would happen to initialized it (calling out to libuv),
510 // and the FFI call needs 2MB of stack when we just ran out.
511 rterrln!("task '{}' has overflowed its stack", n);
513 rterrln!("stack overflow in non-task context");
520 /// This is the entry point of unwinding for things like lang items and such.
521 /// The arguments are normally generated by the compiler, and need to
522 /// have static lifetimes.
523 pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! {
528 fn static_char_ptr(p: *c_char) -> &'static str {
529 let s = unsafe { CString::new(p, false) };
531 Some(s) => unsafe { transmute::<&str, &'static str>(s) },
532 None => rtabort!("message wasn't utf8?")
536 let msg = static_char_ptr(msg);
537 let file = static_char_ptr(file);
539 begin_unwind(msg, file, line as uint)
542 /// This is the entry point of unwinding for fail!() and assert!().
543 pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
545 use rt::in_green_task_context;
546 use rt::local::Local;
549 use unstable::intrinsics;
553 // Note that this should be the only allocation performed in this block.
554 // Currently this means that fail!() on OOM will invoke this code path,
555 // but then again we're not really ready for failing on OOM anyway. If
556 // we do start doing this, then we should propagate this allocation to
557 // be performed in the parent of this task instead of the task that's
559 let msg = ~msg as ~Any;
562 //let msg: &Any = msg;
563 let msg_s = match msg.as_ref::<&'static str>() {
565 None => match msg.as_ref::<~str>() {
566 Some(s) => s.as_slice(),
571 if !in_green_task_context() {
572 rterrln!("failed in non-task context at '{}', {}:{}",
577 task = Local::unsafe_borrow();
578 let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
580 // XXX: this should no get forcibly printed to the console, this should
581 // either be sent to the parent task (ideally), or get printed to
582 // the task's logger. Right now the logger is actually a uvio
583 // instance, which uses unkillable blocks internally for various
584 // reasons. This will cause serious trouble if the task is failing
585 // due to mismanagment of its own kill flag, so calling our own
586 // logger in its current state is a bit of a problem.
588 rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line);
590 if (*task).unwinder.unwinding {
591 rtabort!("unwinding again");
595 (*task).unwinder.begin_unwind(msg);
607 do run_in_newsched_task() {
618 do run_in_newsched_task() {
619 local_data_key!(key: @~str)
620 local_data::set(key, @~"data");
621 assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data");
622 local_data_key!(key2: @~str)
623 local_data::set(key2, @~"data");
624 assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data");
630 do run_in_newsched_task() {
631 let result = spawntask_try(proc()());
632 rtdebug!("trying first assert");
633 assert!(result.is_ok());
634 let result = spawntask_try(proc() fail!());
635 rtdebug!("trying second assert");
636 assert!(result.is_err());
642 do run_in_uv_task() {
643 use rand::{rng, Rng};
645 let _ = r.next_u32();
651 do run_in_uv_task() {
652 info!("here i am. logging in a newsched task");
658 do run_in_newsched_task() {
659 let (port, chan) = Chan::new();
661 assert!(port.recv() == 10);
666 fn comm_shared_chan() {
667 do run_in_newsched_task() {
668 let (port, chan) = SharedChan::new();
670 assert!(port.recv() == 10);
676 use option::{Option, Some, None};
678 do run_in_newsched_task {
680 next: Option<@mut List>,
683 let a = @mut List { next: None };
684 let b = @mut List { next: Some(a) };
692 fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) }