1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Implementation of Rust stack unwinding
13 //! For background on exception handling and stack unwinding please see
14 //! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
15 //! documents linked from it.
16 //! These are also good reads:
17 //! http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
18 //! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
19 //! http://www.airs.com/blog/index.php?s=exception+frames
21 //! ## A brief summary
23 //! Exception handling happens in two phases: a search phase and a cleanup phase.
25 //! In both phases the unwinder walks stack frames from top to bottom using
26 //! information from the stack frame unwind sections of the current process's
27 //! modules ("module" here refers to an OS module, i.e. an executable or a
30 //! For each stack frame, it invokes the associated "personality routine", whose
31 //! address is also stored in the unwind info section.
33 //! In the search phase, the job of a personality routine is to examine exception
34 //! object being thrown, and to decide whether it should be caught at that stack
35 //! frame. Once the handler frame has been identified, cleanup phase begins.
37 //! In the cleanup phase, personality routines invoke cleanup code associated
38 //! with their stack frames (i.e. destructors). Once stack has been unwound down
39 //! to the handler frame level, unwinding stops and the last personality routine
40 //! transfers control to its' catch block.
42 //! ## Frame unwind info registration
44 //! Each module has its' own frame unwind info section (usually ".eh_frame"), and
45 //! unwinder needs to know about all of them in order for unwinding to be able to
46 //! cross module boundaries.
48 //! On some platforms, like Linux, this is achieved by dynamically enumerating
49 //! currently loaded modules via the dl_iterate_phdr() API and finding all
50 //! .eh_frame sections.
52 //! Others, like Windows, require modules to actively register their unwind info
53 //! sections by calling __register_frame_info() API at startup. In the latter
54 //! case it is essential that there is only one copy of the unwinder runtime in
55 //! the process. This is usually achieved by linking to the dynamic version of
56 //! the unwind runtime.
58 //! Currently Rust uses unwind runtime provided by libgcc.
62 use alloc::owned::Box;
63 use collections::string::String;
64 use collections::vec::Vec;
71 use core::raw::Closure;
75 use task::{Task, Result};
81 cause: Option<Box<Any + Send>>
85 uwe: uw::_Unwind_Exception,
86 cause: Option<Box<Any + Send>>,
89 pub type Callback = fn(msg: &Any + Send, file: &'static str, line: uint);
91 // Variables used for invoking callbacks when a task starts to unwind.
93 // For more information, see below.
94 static MAX_CALLBACKS: uint = 16;
95 static mut CALLBACKS: [atomics::AtomicUint, ..MAX_CALLBACKS] =
96 [atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
97 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
98 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
99 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
100 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
101 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
102 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
103 atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT];
104 static mut CALLBACK_CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
107 pub fn new() -> Unwinder {
114 pub fn unwinding(&self) -> bool {
118 pub fn try(&mut self, f: ||) {
119 self.cause = unsafe { try(f) }.err();
122 pub fn result(&mut self) -> Result {
124 Err(self.cause.take().unwrap())
131 /// Invoke a closure, capturing the cause of failure if one occurs.
133 /// This function will return `None` if the closure did not fail, and will
134 /// return `Some(cause)` if the closure fails. The `cause` returned is the
135 /// object with which failure was originally invoked.
137 /// This function also is unsafe for a variety of reasons:
139 /// * This is not safe to call in a nested fashion. The unwinding
140 /// interface for Rust is designed to have at most one try/catch block per
141 /// task, not multiple. No runtime checking is currently performed to uphold
142 /// this invariant, so this function is not safe. A nested try/catch block
143 /// may result in corruption of the outer try/catch block's state, especially
144 /// if this is used within a task itself.
146 /// * It is not sound to trigger unwinding while already unwinding. Rust tasks
147 /// have runtime checks in place to ensure this invariant, but it is not
148 /// guaranteed that a rust task is in place when invoking this function.
149 /// Unwinding twice can lead to resource leaks where some destructors are not
151 pub unsafe fn try(f: ||) -> ::core::result::Result<(), Box<Any + Send>> {
152 let closure: Closure = mem::transmute(f);
153 let ep = rust_try(try_fn, closure.code as *c_void,
154 closure.env as *c_void);
155 return if ep.is_null() {
158 let my_ep = ep as *mut Exception;
159 rtdebug!("caught {}", (*my_ep).uwe.exception_class);
160 let cause = (*my_ep).cause.take();
161 uw::_Unwind_DeleteException(ep);
165 extern fn try_fn(code: *c_void, env: *c_void) {
167 let closure: || = mem::transmute(Closure {
175 #[link(name = "rustrt_native", kind = "static")]
178 // When f(...) returns normally, the return value is null.
179 // When f(...) throws, the return value is a pointer to the caught
181 fn rust_try(f: extern "C" fn(*c_void, *c_void),
183 data: *c_void) -> *uw::_Unwind_Exception;
187 // An uninlined, unmangled function upon which to slap yer breakpoints
190 fn rust_fail(cause: Box<Any + Send>) -> ! {
191 rtdebug!("begin_unwind()");
194 let exception = box Exception {
195 uwe: uw::_Unwind_Exception {
196 exception_class: rust_exception_class(),
197 exception_cleanup: exception_cleanup,
198 private: [0, ..uw::unwinder_private_data_size],
202 let error = uw::_Unwind_RaiseException(mem::transmute(exception));
203 rtabort!("Could not unwind stack, error = {}", error as int)
206 extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
207 exception: *uw::_Unwind_Exception) {
208 rtdebug!("exception_cleanup()");
210 let _: Box<Exception> = mem::transmute(exception);
215 // Rust's exception class identifier. This is used by personality routines to
216 // determine whether the exception was thrown by their own runtime.
217 fn rust_exception_class() -> uw::_Unwind_Exception_Class {
218 // M O Z \0 R U S T -- vendor, language
222 // We could implement our personality routine in pure Rust, however exception
223 // info decoding is tedious. More importantly, personality routines have to
224 // handle various platform quirks, which are not fun to maintain. For this
225 // reason, we attempt to reuse personality routine of the C language:
226 // __gcc_personality_v0.
228 // Since C does not support exception catching, __gcc_personality_v0 simply
229 // always returns _URC_CONTINUE_UNWIND in search phase, and always returns
230 // _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
232 // This is pretty close to Rust's exception handling approach, except that Rust
233 // does have a single "catch-all" handler at the bottom of each task's stack.
234 // So we have two versions:
235 // - rust_eh_personality, used by all cleanup landing pads, which never catches,
236 // so the behavior of __gcc_personality_v0 is perfectly adequate there, and
237 // - rust_eh_personality_catch, used only by rust_try(), which always catches.
238 // This is achieved by overriding the return value in search phase to always
241 #[cfg(not(target_arch = "arm"), not(test))]
243 #[allow(visible_private_types)]
249 fn __gcc_personality_v0(version: c_int,
250 actions: uw::_Unwind_Action,
251 exception_class: uw::_Unwind_Exception_Class,
252 ue_header: *uw::_Unwind_Exception,
253 context: *uw::_Unwind_Context)
254 -> uw::_Unwind_Reason_Code;
257 #[lang="eh_personality"]
258 extern fn eh_personality(
260 actions: uw::_Unwind_Action,
261 exception_class: uw::_Unwind_Exception_Class,
262 ue_header: *uw::_Unwind_Exception,
263 context: *uw::_Unwind_Context
264 ) -> uw::_Unwind_Reason_Code
267 __gcc_personality_v0(version, actions, exception_class, ue_header,
272 #[no_mangle] // referenced from rust_try.ll
273 pub extern "C" fn rust_eh_personality_catch(
275 actions: uw::_Unwind_Action,
276 exception_class: uw::_Unwind_Exception_Class,
277 ue_header: *uw::_Unwind_Exception,
278 context: *uw::_Unwind_Context
279 ) -> uw::_Unwind_Reason_Code
281 if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
282 uw::_URC_HANDLER_FOUND // catch!
284 else { // cleanup phase
286 __gcc_personality_v0(version, actions, exception_class, ue_header,
293 // iOS on armv7 is using SjLj exceptions and therefore requires to use
294 // a specialized personality routine: __gcc_personality_sj0
296 #[cfg(target_os = "ios", target_arch = "arm", not(test))]
298 #[allow(visible_private_types)]
304 fn __gcc_personality_sj0(version: c_int,
305 actions: uw::_Unwind_Action,
306 exception_class: uw::_Unwind_Exception_Class,
307 ue_header: *uw::_Unwind_Exception,
308 context: *uw::_Unwind_Context)
309 -> uw::_Unwind_Reason_Code;
312 #[lang="eh_personality"]
313 #[no_mangle] // so we can reference it by name from middle/trans/base.rs
314 pub extern "C" fn rust_eh_personality(
316 actions: uw::_Unwind_Action,
317 exception_class: uw::_Unwind_Exception_Class,
318 ue_header: *uw::_Unwind_Exception,
319 context: *uw::_Unwind_Context
320 ) -> uw::_Unwind_Reason_Code
323 __gcc_personality_sj0(version, actions, exception_class, ue_header,
328 #[no_mangle] // referenced from rust_try.ll
329 pub extern "C" fn rust_eh_personality_catch(
331 actions: uw::_Unwind_Action,
332 exception_class: uw::_Unwind_Exception_Class,
333 ue_header: *uw::_Unwind_Exception,
334 context: *uw::_Unwind_Context
335 ) -> uw::_Unwind_Reason_Code
337 if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
338 uw::_URC_HANDLER_FOUND // catch!
340 else { // cleanup phase
342 __gcc_personality_sj0(version, actions, exception_class, ue_header,
350 // ARM EHABI uses a slightly different personality routine signature,
351 // but otherwise works the same.
352 #[cfg(target_arch = "arm", not(test), not(target_os = "ios"))]
353 #[allow(visible_private_types)]
359 fn __gcc_personality_v0(state: uw::_Unwind_State,
360 ue_header: *uw::_Unwind_Exception,
361 context: *uw::_Unwind_Context)
362 -> uw::_Unwind_Reason_Code;
365 #[lang="eh_personality"]
366 extern "C" fn eh_personality(
367 state: uw::_Unwind_State,
368 ue_header: *uw::_Unwind_Exception,
369 context: *uw::_Unwind_Context
370 ) -> uw::_Unwind_Reason_Code
373 __gcc_personality_v0(state, ue_header, context)
377 #[no_mangle] // referenced from rust_try.ll
378 pub extern "C" fn rust_eh_personality_catch(
379 state: uw::_Unwind_State,
380 ue_header: *uw::_Unwind_Exception,
381 context: *uw::_Unwind_Context
382 ) -> uw::_Unwind_Reason_Code
384 if (state as c_int & uw::_US_ACTION_MASK as c_int)
385 == uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
386 uw::_URC_HANDLER_FOUND // catch!
388 else { // cleanup phase
390 __gcc_personality_v0(state, ue_header, context)
396 // Entry point of failure from the libcore crate
398 #[lang = "begin_unwind"]
399 pub extern fn rust_begin_unwind(msg: &fmt::Arguments,
400 file: &'static str, line: uint) -> ! {
401 begin_unwind_fmt(msg, file, line)
404 /// The entry point for unwinding with a formatted message.
406 /// This is designed to reduce the amount of code required at the call
407 /// site as much as possible (so that `fail!()` has as low an impact
408 /// on (e.g.) the inlining of other functions as possible), by moving
409 /// the actual formatting into this shared place.
410 #[inline(never)] #[cold]
411 pub fn begin_unwind_fmt(msg: &fmt::Arguments, file: &'static str,
413 use core::fmt::FormatWriter;
415 // We do two allocations here, unfortunately. But (a) they're
416 // required with the current scheme, and (b) we don't handle
417 // failure + OOM properly anyway (see comment in begin_unwind
420 struct VecWriter<'a> { v: &'a mut Vec<u8> }
422 impl<'a> fmt::FormatWriter for VecWriter<'a> {
423 fn write(&mut self, buf: &[u8]) -> fmt::Result {
424 self.v.push_all(buf);
429 let mut v = Vec::new();
430 let _ = write!(&mut VecWriter { v: &mut v }, "{}", msg);
432 begin_unwind_inner(box String::from_utf8(v).unwrap(), file, line)
435 /// This is the entry point of unwinding for fail!() and assert!().
436 #[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
437 pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
438 // Note that this should be the only allocation performed in this code path.
439 // Currently this means that fail!() on OOM will invoke this code path,
440 // but then again we're not really ready for failing on OOM anyway. If
441 // we do start doing this, then we should propagate this allocation to
442 // be performed in the parent of this task instead of the task that's
445 // see below for why we do the `Any` coercion here.
446 begin_unwind_inner(box msg, file, line)
450 /// The core of the unwinding.
452 /// This is non-generic to avoid instantiation bloat in other crates
453 /// (which makes compilation of small crates noticeably slower). (Note:
454 /// we need the `Any` object anyway, we're not just creating it to
455 /// avoid being generic.)
457 /// Do this split took the LLVM IR line counts of `fn main() { fail!()
458 /// }` from ~1900/3700 (-O/no opts) to 180/590.
459 #[inline(never)] #[cold] // this is the slow path, please never inline this
460 fn begin_unwind_inner(msg: Box<Any + Send>,
463 // First, invoke call the user-defined callbacks triggered on task failure.
465 // By the time that we see a callback has been registered (by reading
466 // MAX_CALLBACKS), the actual callback itself may have not been stored yet,
467 // so we just chalk it up to a race condition and move on to the next
468 // callback. Additionally, CALLBACK_CNT may briefly be higher than
469 // MAX_CALLBACKS, so we're sure to clamp it as necessary.
470 let callbacks = unsafe {
471 let amt = CALLBACK_CNT.load(atomics::SeqCst);
472 CALLBACKS.slice_to(cmp::min(amt, MAX_CALLBACKS))
474 for cb in callbacks.iter() {
475 match cb.load(atomics::SeqCst) {
478 let f: Callback = unsafe { mem::transmute(n) };
484 // Now that we've run all the necessary unwind callbacks, we actually
485 // perform the unwinding. If we don't have a task, then it's time to die
486 // (hopefully someone printed something about this).
487 let mut task: Box<Task> = match Local::try_take() {
489 None => rust_fail(msg),
492 if task.unwinder.unwinding {
493 // If a task fails while it's already unwinding then we
494 // have limited options. Currently our preference is to
495 // just abort. In the future we may consider resuming
496 // unwinding or otherwise exiting the task cleanly.
497 rterrln!("task failed during unwinding. aborting.");
498 unsafe { intrinsics::abort() }
500 task.unwinder.unwinding = true;
502 // Put the task back in TLS because the unwinding process may run code which
503 // requires the task. We need a handle to its unwinder, however, so after
504 // this we unsafely extract it and continue along.
509 /// Register a callback to be invoked when a task unwinds.
511 /// This is an unsafe and experimental API which allows for an arbitrary
512 /// callback to be invoked when a task fails. This callback is invoked on both
513 /// the initial unwinding and a double unwinding if one occurs. Additionally,
514 /// the local `Task` will be in place for the duration of the callback, and
515 /// the callback must ensure that it remains in place once the callback returns.
517 /// Only a limited number of callbacks can be registered, and this function
518 /// returns whether the callback was successfully registered or not. It is not
519 /// currently possible to unregister a callback once it has been registered.
521 pub unsafe fn register(f: Callback) -> bool {
522 match CALLBACK_CNT.fetch_add(1, atomics::SeqCst) {
523 // The invocation code has knowledge of this window where the count has
524 // been incremented, but the callback has not been stored. We're
525 // guaranteed that the slot we're storing into is 0.
526 n if n < MAX_CALLBACKS => {
527 let prev = CALLBACKS[n].swap(mem::transmute(f), atomics::SeqCst);
528 rtassert!(prev == 0);
531 // If we accidentally bumped the count too high, pull it back.
533 CALLBACK_CNT.store(MAX_CALLBACKS, atomics::SeqCst);