-/// This function is invoked from rust's current __morestack function. Segmented
-/// stacks are currently not enabled as segmented stacks, but rather one giant
-/// stack segment. This means that whenever we run out of stack, we want to
-/// truly consider it to be stack overflow rather than allocating a new stack.
-#[no_mangle] // - this is called from C code
-#[no_split_stack] // - it would be sad for this function to trigger __morestack
-#[doc(hidden)] // - Function must be `pub` to get exported, but it's
- // irrelevant for documentation purposes.
-#[cfg(not(test))] // in testing, use the original libstd's version
-pub extern "C" fn rust_stack_exhausted() {
- use rt::in_green_task_context;
- use rt::task::Task;
- use rt::local::Local;
- use unstable::intrinsics;
-
- unsafe {
- // We're calling this function because the stack just ran out. We need
- // to call some other rust functions, but if we invoke the functions
- // right now it'll just trigger this handler being called again. In
- // order to alleviate this, we move the stack limit to be inside of the
- // red zone that was allocated for exactly this reason.
- let limit = context::get_sp_limit();
- context::record_sp_limit(limit - context::RED_ZONE / 2);
-
- // This probably isn't the best course of action. Ideally one would want
- // to unwind the stack here instead of just aborting the entire process.
- // This is a tricky problem, however. There's a few things which need to
- // be considered:
- //
- // 1. We're here because of a stack overflow, yet unwinding will run
- // destructors and hence arbitrary code. What if that code overflows
- // the stack? One possibility is to use the above allocation of an
- // extra 10k to hope that we don't hit the limit, and if we do then
- // abort the whole program. Not the best, but kind of hard to deal
- // with unless we want to switch stacks.
- //
- // 2. LLVM will optimize functions based on whether they can unwind or
- // not. It will flag functions with 'nounwind' if it believes that
- // the function cannot trigger unwinding, but if we do unwind on
- // stack overflow then it means that we could unwind in any function
- // anywhere. We would have to make sure that LLVM only places the
- // nounwind flag on functions which don't call any other functions.
- //
- // 3. The function that overflowed may have owned arguments. These
- // arguments need to have their destructors run, but we haven't even
- // begun executing the function yet, so unwinding will not run the
- // any landing pads for these functions. If this is ignored, then
- // the arguments will just be leaked.
- //
- // Exactly what to do here is a very delicate topic, and is possibly
- // still up in the air for what exactly to do. Some relevant issues:
- //
- // #3555 - out-of-stack failure leaks arguments
- // #3695 - should there be a stack limit?
- // #9855 - possible strategies which could be taken
- // #9854 - unwinding on windows through __morestack has never worked
- // #2361 - possible implementation of not using landing pads
-
- if in_green_task_context() {
- let mut task = Local::borrow(None::<Task>);
- let n = task.get()
- .name
- .as_ref()
- .map(|n| n.as_slice())
- .unwrap_or("<unnamed>");
-
- // See the message below for why this is not emitted to the
- // task's logger. This has the additional conundrum of the
- // logger may not be initialized just yet, meaning that an FFI
- // call would happen to initialized it (calling out to libuv),
- // and the FFI call needs 2MB of stack when we just ran out.
- rterrln!("task '{}' has overflowed its stack", n);