use prelude::*;
use borrow;
-use cast::transmute;
-use cell::Cell;
use cleanup;
-use libc::{c_void, uintptr_t, c_char, size_t};
+use io::Writer;
+use libc::{c_char, size_t};
use local_data;
use option::{Option, Some, None};
use rt::borrowck::BorrowRecord;
use rt::borrowck;
use rt::context::Context;
-use rt::context;
use rt::env;
-use io::Writer;
use rt::kill::Death;
use rt::local::Local;
use rt::logging::StdErrLogger;
use rt::sched::{Scheduler, SchedHandle};
use rt::stack::{StackSegment, StackPool};
+use rt::unwind::Unwinder;
use send_str::SendStr;
-use task::LinkedFailure;
-use task::spawn::Taskgroup;
use unstable::finally::Finally;
+use unstable::mutex::Mutex;
// The Task struct represents all state associated with a rust
// task. There are at this point two primary "subtypes" of task,
storage: LocalStorage,
logger: Option<StdErrLogger>,
unwinder: Unwinder,
- taskgroup: Option<Taskgroup>,
death: Death,
destroyed: bool,
name: Option<SendStr>,
// Dynamic borrowck debugging info
borrow_list: Option<~[BorrowRecord]>,
stdout_handle: Option<~Writer>,
+
+ // See the comments in the scheduler about why this is necessary
+ nasty_deschedule_lock: Mutex,
}
pub enum TaskType {
pub struct GarbageCollector;
pub struct LocalStorage(Option<local_data::Map>);
-/// Represents the reason for the current unwinding process
-pub enum UnwindResult {
- /// The task is ending successfully
- Success,
-
- /// The Task is failing with reason `~Any`
- Failure(~Any),
-}
-
-impl UnwindResult {
- /// Returns `true` if this `UnwindResult` is a failure
- #[inline]
- pub fn is_failure(&self) -> bool {
- match *self {
- Success => false,
- Failure(_) => true
- }
- }
-
- /// Returns `true` if this `UnwindResult` is a success
- #[inline]
- pub fn is_success(&self) -> bool {
- match *self {
- Success => true,
- Failure(_) => false
- }
- }
-}
-
-pub struct Unwinder {
- unwinding: bool,
- cause: Option<~Any>
-}
-
-impl Unwinder {
- fn to_unwind_result(&mut self) -> UnwindResult {
- if self.unwinding {
- Failure(self.cause.take().unwrap())
- } else {
- Success
- }
- }
-}
-
impl Task {
// A helper to build a new task using the dynamically found
f: proc(),
home: SchedHome)
-> ~Task {
- let f = Cell::new(f);
- let home = Cell::new(home);
- do Local::borrow |running_task: &mut Task| {
- let mut sched = running_task.sched.take_unwrap();
- let new_task = ~running_task.new_child_homed(&mut sched.stack_pool,
- stack_size,
- home.take(),
- f.take());
- running_task.sched = Some(sched);
- new_task
- }
+ let mut running_task = Local::borrow(None::<Task>);
+ let mut sched = running_task.get().sched.take_unwrap();
+ let new_task = ~running_task.get()
+ .new_child_homed(&mut sched.stack_pool,
+ stack_size,
+ home,
+ f);
+ running_task.get().sched = Some(sched);
+ new_task
}
pub fn build_child(stack_size: Option<uint>, f: proc()) -> ~Task {
f: proc(),
home: SchedHome)
-> ~Task {
- let f = Cell::new(f);
- let home = Cell::new(home);
- do Local::borrow |running_task: &mut Task| {
- let mut sched = running_task.sched.take_unwrap();
- let new_task = ~Task::new_root_homed(&mut sched.stack_pool,
- stack_size,
- home.take(),
- f.take());
- running_task.sched = Some(sched);
- new_task
- }
+ let mut running_task = Local::borrow(None::<Task>);
+ let mut sched = running_task.get().sched.take_unwrap();
+ let new_task = ~Task::new_root_homed(&mut sched.stack_pool,
+ stack_size,
+ home,
+ f);
+ running_task.get().sched = Some(sched);
+ new_task
}
pub fn build_root(stack_size: Option<uint>, f: proc()) -> ~Task {
storage: LocalStorage(None),
logger: None,
unwinder: Unwinder { unwinding: false, cause: None },
- taskgroup: None,
death: Death::new(),
destroyed: false,
coroutine: Some(Coroutine::empty()),
task_type: SchedTask,
borrow_list: None,
stdout_handle: None,
+ nasty_deschedule_lock: unsafe { Mutex::new() },
}
}
storage: LocalStorage(None),
logger: None,
unwinder: Unwinder { unwinding: false, cause: None },
- taskgroup: None,
death: Death::new(),
destroyed: false,
name: None,
task_type: GreenTask(Some(home)),
borrow_list: None,
stdout_handle: None,
+ nasty_deschedule_lock: unsafe { Mutex::new() },
}
}
storage: LocalStorage(None),
logger: None,
unwinder: Unwinder { unwinding: false, cause: None },
- taskgroup: None,
- // FIXME(#7544) make watching optional
- death: self.death.new_child(),
+ death: Death::new(),
destroyed: false,
name: None,
coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
task_type: GreenTask(Some(home)),
borrow_list: None,
stdout_handle: None,
+ nasty_deschedule_lock: unsafe { Mutex::new() },
}
}
}
}
- pub fn run(&mut self, f: &fn()) {
+ pub fn run(&mut self, f: ||) {
rtdebug!("run called on task: {}", borrow::to_uint(self));
// The only try/catch block in the world. Attempt to run the task's
// client-specified code and catch any failures.
- do self.unwinder.try {
+ self.unwinder.try(|| {
// Run the task main function, then do some cleanup.
- do f.finally {
+ f.finally(|| {
// First, destroy task-local storage. This may run user dtors.
//
None => {}
}
self.logger.take();
- }
- }
+ })
+ });
// Cleanup the dynamic borrowck debugging info
borrowck::clear_task_borrow_list();
- // NB. We pass the taskgroup into death so that it can be dropped while
- // the unkillable counter is set. This is necessary for when the
- // taskgroup destruction code drops references on KillHandles, which
- // might require using unkillable (to synchronize with an unwrapper).
- self.death.collect_failure(self.unwinder.to_unwind_result(), self.taskgroup.take());
+ self.death.collect_failure(self.unwinder.result());
self.destroyed = true;
}
pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
match self.task_type {
GreenTask(Some(AnySched)) => { false }
- GreenTask(Some(Sched(SchedHandle { sched_id: ref id, _}))) => {
+ GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => {
*id == sched.sched_id()
}
GreenTask(None) => {
pub fn homed(&self) -> bool {
match self.task_type {
GreenTask(Some(AnySched)) => { false }
- GreenTask(Some(Sched(SchedHandle { _ }))) => { true }
+ GreenTask(Some(Sched(SchedHandle { .. }))) => { true }
GreenTask(None) => {
rtabort!("task without home");
}
// Grab both the scheduler and the task from TLS and check if the
// task is executing on an appropriate scheduler.
pub fn on_appropriate_sched() -> bool {
- do Local::borrow |task: &mut Task| {
- let sched_id = task.sched.get_ref().sched_id();
- let sched_run_anything = task.sched.get_ref().run_anything;
- match task.task_type {
- GreenTask(Some(AnySched)) => {
- rtdebug!("anysched task in sched check ****");
- sched_run_anything
- }
- GreenTask(Some(Sched(SchedHandle { sched_id: ref id, _ }))) => {
- rtdebug!("homed task in sched check ****");
- *id == sched_id
- }
- GreenTask(None) => {
- rtabort!("task without home");
- }
- SchedTask => {
- rtabort!("type error: expected: GreenTask, found: SchedTask");
- }
+ let mut task = Local::borrow(None::<Task>);
+ let sched_id = task.get().sched.get_ref().sched_id();
+ let sched_run_anything = task.get().sched.get_ref().run_anything;
+ match task.get().task_type {
+ GreenTask(Some(AnySched)) => {
+ rtdebug!("anysched task in sched check ****");
+ sched_run_anything
+ }
+ GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => {
+ rtdebug!("homed task in sched check ****");
+ *id == sched_id
+ }
+ GreenTask(None) => {
+ rtabort!("task without home");
+ }
+ SchedTask => {
+ rtabort!("type error: expected: GreenTask, found: SchedTask");
}
}
}
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", borrow::to_uint(self));
rtassert!(self.destroyed);
+
+ unsafe { self.nasty_deschedule_lock.destroy(); }
}
}
}
fn build_start_wrapper(start: proc()) -> proc() {
- let start_cell = Cell::new(start);
- let wrapper: proc() = || {
+ let wrapper: proc() = proc() {
// First code after swap to this new context. Run our
// cleanup job.
unsafe {
// Again - might work while safe, or it might not.
- do Local::borrow |sched: &mut Scheduler| {
- sched.run_cleanup_job();
+ {
+ let mut sched = Local::borrow(None::<Scheduler>);
+ sched.get().run_cleanup_job();
}
// To call the run method on a task we need a direct
// need to unsafe_borrow.
let task: *mut Task = Local::unsafe_borrow();
- do (*task).run {
+ let mut start_cell = Some(start);
+ (*task).run(|| {
// N.B. Removing `start` from the start wrapper
// closure by emptying a cell is critical for
// correctness. The ~Task pointer, and in turn the
// be in task context. By moving `start` out of
// the closure, all the user code goes our of
// scope while the task is still running.
- let start = start_cell.take();
+ let start = start_cell.take_unwrap();
start();
- };
+ });
}
// We remove the sched from the Task in TLS right now.
/// Destroy coroutine and try to reuse stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) {
match self {
- Coroutine { current_stack_segment, _ } => {
+ Coroutine { current_stack_segment, .. } => {
stack_pool.give_segment(current_stack_segment);
}
}
}
-
-// Just a sanity check to make sure we are catching a Rust-thrown exception
-static UNWIND_TOKEN: uintptr_t = 839147;
-
-impl Unwinder {
- pub fn try(&mut self, f: &fn()) {
- use unstable::raw::Closure;
-
- unsafe {
- let closure: Closure = transmute(f);
- let code = transmute(closure.code);
- let env = transmute(closure.env);
-
- let token = rust_try(try_fn, code, env);
- assert!(token == 0 || token == UNWIND_TOKEN);
- }
-
- extern fn try_fn(code: *c_void, env: *c_void) {
- unsafe {
- let closure: Closure = Closure {
- code: transmute(code),
- env: transmute(env),
- };
- let closure: &fn() = transmute(closure);
- closure();
- }
- }
-
- extern {
- fn rust_try(f: extern "C" fn(*c_void, *c_void),
- code: *c_void,
- data: *c_void) -> uintptr_t;
- }
- }
-
- pub fn begin_unwind(&mut self, cause: ~Any) -> ! {
- self.unwinding = true;
- self.cause = Some(cause);
- unsafe {
- rust_begin_unwind(UNWIND_TOKEN);
- return transmute(());
- }
- extern {
- fn rust_begin_unwind(token: uintptr_t);
- }
- }
-}
-
/// This function is invoked from rust's current __morestack function. Segmented
/// stacks are currently not enabled as segmented stacks, but rather one giant
/// stack segment. This means that whenever we run out of stack, we want to
#[no_split_stack] // - it would be sad for this function to trigger __morestack
#[doc(hidden)] // - Function must be `pub` to get exported, but it's
// irrelevant for documentation purposes.
+#[cfg(not(test))] // in testing, use the original libstd's version
pub extern "C" fn rust_stack_exhausted() {
+ use rt::context;
use rt::in_green_task_context;
use rt::task::Task;
use rt::local::Local;
// #2361 - possible implementation of not using landing pads
if in_green_task_context() {
- do Local::borrow |task: &mut Task| {
- let n = task.name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
-
- // See the message below for why this is not emitted to the
- // task's logger. This has the additional conundrum of the
- // logger may not be initialized just yet, meaning that an FFI
- // call would happen to initialized it (calling out to libuv),
- // and the FFI call needs 2MB of stack when we just ran out.
- rterrln!("task '{}' has overflowed its stack", n);
- }
+ let mut task = Local::borrow(None::<Task>);
+ let n = task.get()
+ .name
+ .as_ref()
+ .map(|n| n.as_slice())
+ .unwrap_or("<unnamed>");
+
+ // See the message below for why this is not emitted to the
+ // task's logger. This has the additional conundrum of the
+ // logger may not be initialized just yet, meaning that an FFI
+ // call would happen to initialized it (calling out to libuv),
+ // and the FFI call needs 2MB of stack when we just ran out.
+ rterrln!("task '{}' has overflowed its stack", n);
} else {
rterrln!("stack overflow in non-task context");
}
Some(s) => *s,
None => match msg.as_ref::<~str>() {
Some(s) => s.as_slice(),
- None => match msg.as_ref::<LinkedFailure>() {
- Some(*) => "linked failure",
- None => "~Any",
- }
+ None => "~Any",
}
};
mod test {
use super::*;
use rt::test::*;
+ use prelude::*;
#[test]
fn local_heap() {
#[test]
fn unwind() {
do run_in_newsched_task() {
- let result = spawntask_try(||());
+ let result = spawntask_try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
- let result = spawntask_try(|| fail!());
+ let result = spawntask_try(proc() fail!());
rtdebug!("trying second assert");
assert!(result.is_err());
}
}
}
- #[test]
- fn comm_oneshot() {
- use comm::*;
-
- do run_in_newsched_task {
- let (port, chan) = oneshot();
- chan.send(10);
- assert!(port.recv() == 10);
- }
- }
-
#[test]
fn comm_stream() {
- use comm::*;
-
do run_in_newsched_task() {
- let (port, chan) = stream();
+ let (port, chan) = Chan::new();
chan.send(10);
assert!(port.recv() == 10);
}
#[test]
fn comm_shared_chan() {
- use comm::*;
-
do run_in_newsched_task() {
- let (port, chan) = stream();
- let chan = SharedChan::new(chan);
+ let (port, chan) = SharedChan::new();
chan.send(10);
assert!(port.recv() == 10);
}
}
- #[test]
- fn linked_failure() {
- do run_in_newsched_task() {
- let res = do spawntask_try {
- spawntask_random(|| fail!());
- };
- assert!(res.is_err());
- }
- }
-
#[test]
fn heap_cycles() {
use option::{Option, Some, None};