1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Implementation of Rust stack unwinding
13 //! For background on exception handling and stack unwinding please see
14 //! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
15 //! documents linked from it.
16 //! These are also good reads:
17 //! http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
18 //! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
19 //! http://www.airs.com/blog/index.php?s=exception+frames
21 //! ## A brief summary
23 //! Exception handling happens in two phases: a search phase and a cleanup phase.
25 //! In both phases the unwinder walks stack frames from top to bottom using
26 //! information from the stack frame unwind sections of the current process's
27 //! modules ("module" here refers to an OS module, i.e. an executable or a
30 //! For each stack frame, it invokes the associated "personality routine", whose
31 //! address is also stored in the unwind info section.
33 //! In the search phase, the job of a personality routine is to examine exception
34 //! object being thrown, and to decide whether it should be caught at that stack
35 //! frame. Once the handler frame has been identified, cleanup phase begins.
37 //! In the cleanup phase, personality routines invoke cleanup code associated
38 //! with their stack frames (i.e. destructors). Once stack has been unwound down
39 //! to the handler frame level, unwinding stops and the last personality routine
40 //! transfers control to its catch block.
42 //! ## Frame unwind info registration
44 //! Each module has its own frame unwind info section (usually ".eh_frame"), and
45 //! unwinder needs to know about all of them in order for unwinding to be able to
46 //! cross module boundaries.
48 //! On some platforms, like Linux, this is achieved by dynamically enumerating
49 //! currently loaded modules via the dl_iterate_phdr() API and finding all
50 //! .eh_frame sections.
52 //! Others, like Windows, require modules to actively register their unwind info
53 //! sections by calling __register_frame_info() API at startup. In the latter
54 //! case it is essential that there is only one copy of the unwinder runtime in
55 //! the process. This is usually achieved by linking to the dynamic version of
56 //! the unwind runtime.
58 //! Currently Rust uses unwind runtime provided by libgcc.
70 use sync::atomic::{self, Ordering};
71 use sync::{Once, ONCE_INIT};
73 use rt::libunwind as uw;
76 uwe: uw::_Unwind_Exception,
77 cause: Option<Box<Any + Send>>,
80 pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
82 // Variables used for invoking callbacks when a thread starts to unwind.
84 // For more information, see below.
85 const MAX_CALLBACKS: uint = 16;
86 static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
87 [atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
88 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
89 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
90 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
91 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
92 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
93 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
94 atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
95 static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
97 thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
99 /// Invoke a closure, capturing the cause of panic if one occurs.
101 /// This function will return `Ok(())` if the closure did not panic, and will
102 /// return `Err(cause)` if the closure panics. The `cause` returned is the
103 /// object with which panic was originally invoked.
105 /// This function also is unsafe for a variety of reasons:
107 /// * This is not safe to call in a nested fashion. The unwinding
108 /// interface for Rust is designed to have at most one try/catch block per
109 /// thread, not multiple. No runtime checking is currently performed to uphold
110 /// this invariant, so this function is not safe. A nested try/catch block
111 /// may result in corruption of the outer try/catch block's state, especially
112 /// if this is used within a thread itself.
114 /// * It is not sound to trigger unwinding while already unwinding. Rust threads
115 /// have runtime checks in place to ensure this invariant, but it is not
116 /// guaranteed that a rust thread is in place when invoking this function.
117 /// Unwinding twice can lead to resource leaks where some destructors are not
119 pub unsafe fn try<F: FnOnce()>(f: F) -> Result<(), Box<Any + Send>> {
122 let prev = PANICKING.with(|s| s.get());
123 PANICKING.with(|s| s.set(false));
124 let ep = rust_try(try_fn::<F>, &mut f as *mut _ as *mut c_void);
125 PANICKING.with(|s| s.set(prev));
126 return if ep.is_null() {
129 let my_ep = ep as *mut Exception;
130 rtdebug!("caught {}", (*my_ep).uwe.exception_class);
131 let cause = (*my_ep).cause.take();
132 uw::_Unwind_DeleteException(ep);
136 extern fn try_fn<F: FnOnce()>(opt_closure: *mut c_void) {
137 let opt_closure = opt_closure as *mut Option<F>;
138 unsafe { (*opt_closure).take().unwrap()(); }
141 #[link(name = "rustrt_native", kind = "static")]
147 // When f(...) returns normally, the return value is null.
148 // When f(...) throws, the return value is a pointer to the caught
150 fn rust_try(f: extern fn(*mut c_void),
151 data: *mut c_void) -> *mut uw::_Unwind_Exception;
155 /// Determines whether the current thread is unwinding because of panic.
156 pub fn panicking() -> bool {
157 PANICKING.with(|s| s.get())
160 // An uninlined, unmangled function upon which to slap yer breakpoints
163 #[allow(private_no_mangle_fns)]
164 fn rust_panic(cause: Box<Any + Send>) -> ! {
165 rtdebug!("begin_unwind()");
168 let exception = box Exception {
169 uwe: uw::_Unwind_Exception {
170 exception_class: rust_exception_class(),
171 exception_cleanup: exception_cleanup,
172 private: [0; uw::unwinder_private_data_size],
176 let error = uw::_Unwind_RaiseException(mem::transmute(exception));
177 rtabort!("Could not unwind stack, error = {}", error as int)
180 extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
181 exception: *mut uw::_Unwind_Exception) {
182 rtdebug!("exception_cleanup()");
184 let _: Box<Exception> = mem::transmute(exception);
189 // Rust's exception class identifier. This is used by personality routines to
190 // determine whether the exception was thrown by their own runtime.
191 fn rust_exception_class() -> uw::_Unwind_Exception_Class {
192 // M O Z \0 R U S T -- vendor, language
196 // We could implement our personality routine in pure Rust, however exception
197 // info decoding is tedious. More importantly, personality routines have to
198 // handle various platform quirks, which are not fun to maintain. For this
199 // reason, we attempt to reuse personality routine of the C language:
200 // __gcc_personality_v0.
202 // Since C does not support exception catching, __gcc_personality_v0 simply
203 // always returns _URC_CONTINUE_UNWIND in search phase, and always returns
204 // _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
206 // This is pretty close to Rust's exception handling approach, except that Rust
207 // does have a single "catch-all" handler at the bottom of each thread's stack.
208 // So we have two versions of the personality routine:
209 // - rust_eh_personality, used by all cleanup landing pads, which never catches,
210 // so the behavior of __gcc_personality_v0 is perfectly adequate there, and
211 // - rust_eh_personality_catch, used only by rust_try(), which always catches.
213 // Note, however, that for implementation simplicity, rust_eh_personality_catch
214 // lacks code to install a landing pad, so in order to obtain exception object
215 // pointer (which it needs to return upstream), rust_try() employs another trick:
216 // it calls into the nested rust_try_inner(), whose landing pad does not resume
217 // unwinds. Instead, it extracts the exception pointer and performs a "normal"
220 // See also: rt/rust_try.ll
222 #[cfg(all(not(target_arch = "arm"),
223 not(all(windows, target_arch = "x86_64")),
227 use rt::libunwind as uw;
231 fn __gcc_personality_v0(version: c_int,
232 actions: uw::_Unwind_Action,
233 exception_class: uw::_Unwind_Exception_Class,
234 ue_header: *mut uw::_Unwind_Exception,
235 context: *mut uw::_Unwind_Context)
236 -> uw::_Unwind_Reason_Code;
239 #[lang="eh_personality"]
240 #[no_mangle] // referenced from rust_try.ll
241 #[allow(private_no_mangle_fns)]
242 extern fn rust_eh_personality(
244 actions: uw::_Unwind_Action,
245 exception_class: uw::_Unwind_Exception_Class,
246 ue_header: *mut uw::_Unwind_Exception,
247 context: *mut uw::_Unwind_Context
248 ) -> uw::_Unwind_Reason_Code
251 __gcc_personality_v0(version, actions, exception_class, ue_header,
256 #[no_mangle] // referenced from rust_try.ll
257 pub extern "C" fn rust_eh_personality_catch(
259 actions: uw::_Unwind_Action,
260 _exception_class: uw::_Unwind_Exception_Class,
261 _ue_header: *mut uw::_Unwind_Exception,
262 _context: *mut uw::_Unwind_Context
263 ) -> uw::_Unwind_Reason_Code
266 if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
267 uw::_URC_HANDLER_FOUND // catch!
269 else { // cleanup phase
270 uw::_URC_INSTALL_CONTEXT
275 // iOS on armv7 is using SjLj exceptions and therefore requires to use
276 // a specialized personality routine: __gcc_personality_sj0
278 #[cfg(all(target_os = "ios", target_arch = "arm", not(test)))]
281 use rt::libunwind as uw;
285 fn __gcc_personality_sj0(version: c_int,
286 actions: uw::_Unwind_Action,
287 exception_class: uw::_Unwind_Exception_Class,
288 ue_header: *mut uw::_Unwind_Exception,
289 context: *mut uw::_Unwind_Context)
290 -> uw::_Unwind_Reason_Code;
293 #[lang="eh_personality"]
294 #[no_mangle] // referenced from rust_try.ll
295 #[allow(private_no_mangle_fns)]
296 pub extern "C" fn rust_eh_personality(
298 actions: uw::_Unwind_Action,
299 exception_class: uw::_Unwind_Exception_Class,
300 ue_header: *mut uw::_Unwind_Exception,
301 context: *mut uw::_Unwind_Context
302 ) -> uw::_Unwind_Reason_Code
305 __gcc_personality_sj0(version, actions, exception_class, ue_header,
310 #[no_mangle] // referenced from rust_try.ll
311 pub extern "C" fn rust_eh_personality_catch(
313 actions: uw::_Unwind_Action,
314 _exception_class: uw::_Unwind_Exception_Class,
315 _ue_header: *mut uw::_Unwind_Exception,
316 _context: *mut uw::_Unwind_Context
317 ) -> uw::_Unwind_Reason_Code
319 if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
320 uw::_URC_HANDLER_FOUND // catch!
322 else { // cleanup phase
324 __gcc_personality_sj0(_version, actions, _exception_class, _ue_header,
332 // ARM EHABI uses a slightly different personality routine signature,
333 // but otherwise works the same.
334 #[cfg(all(target_arch = "arm", not(target_os = "ios"), not(test)))]
337 use rt::libunwind as uw;
341 fn __gcc_personality_v0(state: uw::_Unwind_State,
342 ue_header: *mut uw::_Unwind_Exception,
343 context: *mut uw::_Unwind_Context)
344 -> uw::_Unwind_Reason_Code;
347 #[lang="eh_personality"]
348 #[no_mangle] // referenced from rust_try.ll
349 #[allow(private_no_mangle_fns)]
350 extern "C" fn rust_eh_personality(
351 state: uw::_Unwind_State,
352 ue_header: *mut uw::_Unwind_Exception,
353 context: *mut uw::_Unwind_Context
354 ) -> uw::_Unwind_Reason_Code
357 __gcc_personality_v0(state, ue_header, context)
361 #[no_mangle] // referenced from rust_try.ll
362 pub extern "C" fn rust_eh_personality_catch(
363 state: uw::_Unwind_State,
364 _ue_header: *mut uw::_Unwind_Exception,
365 _context: *mut uw::_Unwind_Context
366 ) -> uw::_Unwind_Reason_Code
368 if (state as c_int & uw::_US_ACTION_MASK as c_int)
369 == uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
370 uw::_URC_HANDLER_FOUND // catch!
372 else { // cleanup phase
373 uw::_URC_INSTALL_CONTEXT
378 // Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
380 // This looks a bit convoluted because rather than implementing a native SEH handler,
381 // GCC reuses the same personality routine as for the other architectures by wrapping it
382 // with an "API translator" layer (_GCC_specific_handler).
384 #[cfg(all(windows, target_arch = "x86_64", not(test)))]
386 #[allow(non_camel_case_types, non_snake_case)]
388 pub use self::EXCEPTION_DISPOSITION::*;
389 use rt::libunwind as uw;
390 use libc::{c_void, c_int};
393 pub struct EXCEPTION_RECORD;
397 pub struct DISPATCHER_CONTEXT;
401 pub enum EXCEPTION_DISPOSITION {
402 ExceptionContinueExecution,
403 ExceptionContinueSearch,
404 ExceptionNestedException,
405 ExceptionCollidedUnwind
408 type _Unwind_Personality_Fn =
411 actions: uw::_Unwind_Action,
412 exception_class: uw::_Unwind_Exception_Class,
413 ue_header: *mut uw::_Unwind_Exception,
414 context: *mut uw::_Unwind_Context
415 ) -> uw::_Unwind_Reason_Code;
418 fn __gcc_personality_seh0(
419 exceptionRecord: *mut EXCEPTION_RECORD,
420 establisherFrame: *mut c_void,
421 contextRecord: *mut CONTEXT,
422 dispatcherContext: *mut DISPATCHER_CONTEXT
423 ) -> EXCEPTION_DISPOSITION;
425 fn _GCC_specific_handler(
426 exceptionRecord: *mut EXCEPTION_RECORD,
427 establisherFrame: *mut c_void,
428 contextRecord: *mut CONTEXT,
429 dispatcherContext: *mut DISPATCHER_CONTEXT,
430 personality: _Unwind_Personality_Fn
431 ) -> EXCEPTION_DISPOSITION;
434 #[lang="eh_personality"]
435 #[no_mangle] // referenced from rust_try.ll
436 #[allow(private_no_mangle_fns)]
437 extern "C" fn rust_eh_personality(
438 exceptionRecord: *mut EXCEPTION_RECORD,
439 establisherFrame: *mut c_void,
440 contextRecord: *mut CONTEXT,
441 dispatcherContext: *mut DISPATCHER_CONTEXT
442 ) -> EXCEPTION_DISPOSITION
445 __gcc_personality_seh0(exceptionRecord, establisherFrame,
446 contextRecord, dispatcherContext)
450 #[no_mangle] // referenced from rust_try.ll
451 pub extern "C" fn rust_eh_personality_catch(
452 exceptionRecord: *mut EXCEPTION_RECORD,
453 establisherFrame: *mut c_void,
454 contextRecord: *mut CONTEXT,
455 dispatcherContext: *mut DISPATCHER_CONTEXT
456 ) -> EXCEPTION_DISPOSITION
460 actions: uw::_Unwind_Action,
461 _exception_class: uw::_Unwind_Exception_Class,
462 _ue_header: *mut uw::_Unwind_Exception,
463 _context: *mut uw::_Unwind_Context
464 ) -> uw::_Unwind_Reason_Code
466 if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
467 uw::_URC_HANDLER_FOUND // catch!
469 else { // cleanup phase
470 uw::_URC_INSTALL_CONTEXT
475 _GCC_specific_handler(exceptionRecord, establisherFrame,
476 contextRecord, dispatcherContext,
483 /// Entry point of panic from the libcore crate.
484 #[lang = "panic_fmt"]
485 pub extern fn rust_begin_unwind(msg: fmt::Arguments,
486 file: &'static str, line: uint) -> ! {
487 begin_unwind_fmt(msg, &(file, line))
490 /// The entry point for unwinding with a formatted message.
492 /// This is designed to reduce the amount of code required at the call
493 /// site as much as possible (so that `panic!()` has as low an impact
494 /// on (e.g.) the inlining of other functions as possible), by moving
495 /// the actual formatting into this shared place.
496 #[inline(never)] #[cold]
497 pub fn begin_unwind_fmt(msg: fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
500 // We do two allocations here, unfortunately. But (a) they're
501 // required with the current scheme, and (b) we don't handle
502 // panic + OOM properly anyway (see comment in begin_unwind
505 let mut s = String::new();
506 let _ = write!(&mut s, "{}", msg);
507 begin_unwind_inner(box s, file_line)
510 /// This is the entry point of unwinding for panic!() and assert!().
511 #[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
512 pub fn begin_unwind<M: Any + Send>(msg: M, file_line: &(&'static str, uint)) -> ! {
513 // Note that this should be the only allocation performed in this code path.
514 // Currently this means that panic!() on OOM will invoke this code path,
515 // but then again we're not really ready for panic on OOM anyway. If
516 // we do start doing this, then we should propagate this allocation to
517 // be performed in the parent of this thread instead of the thread that's
520 // see below for why we do the `Any` coercion here.
521 begin_unwind_inner(box msg, file_line)
524 /// The core of the unwinding.
526 /// This is non-generic to avoid instantiation bloat in other crates
527 /// (which makes compilation of small crates noticeably slower). (Note:
528 /// we need the `Any` object anyway, we're not just creating it to
529 /// avoid being generic.)
531 /// Doing this split took the LLVM IR line counts of `fn main() { panic!()
532 /// }` from ~1900/3700 (-O/no opts) to 180/590.
533 #[inline(never)] #[cold] // this is the slow path, please never inline this
534 fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) -> ! {
535 // Make sure the default failure handler is registered before we look at the
537 static INIT: Once = ONCE_INIT;
538 INIT.call_once(|| unsafe { register(failure::on_fail); });
540 // First, invoke call the user-defined callbacks triggered on thread panic.
542 // By the time that we see a callback has been registered (by reading
543 // MAX_CALLBACKS), the actual callback itself may have not been stored yet,
544 // so we just chalk it up to a race condition and move on to the next
545 // callback. Additionally, CALLBACK_CNT may briefly be higher than
546 // MAX_CALLBACKS, so we're sure to clamp it as necessary.
548 let amt = CALLBACK_CNT.load(Ordering::SeqCst);
549 &CALLBACKS[..cmp::min(amt, MAX_CALLBACKS)]
551 for cb in callbacks {
552 match cb.load(Ordering::SeqCst) {
555 let f: Callback = unsafe { mem::transmute(n) };
556 let (file, line) = *file_line;
557 f(&*msg, file, line);
562 // Now that we've run all the necessary unwind callbacks, we actually
563 // perform the unwinding.
565 // If a thread panics while it's already unwinding then we
566 // have limited options. Currently our preference is to
567 // just abort. In the future we may consider resuming
568 // unwinding or otherwise exiting the thread cleanly.
569 rterrln!("thread panicked while panicking. aborting.");
570 unsafe { intrinsics::abort() }
572 PANICKING.with(|s| s.set(true));
576 /// Register a callback to be invoked when a thread unwinds.
578 /// This is an unsafe and experimental API which allows for an arbitrary
579 /// callback to be invoked when a thread panics. This callback is invoked on both
580 /// the initial unwinding and a double unwinding if one occurs. Additionally,
581 /// the local `Task` will be in place for the duration of the callback, and
582 /// the callback must ensure that it remains in place once the callback returns.
584 /// Only a limited number of callbacks can be registered, and this function
585 /// returns whether the callback was successfully registered or not. It is not
586 /// currently possible to unregister a callback once it has been registered.
587 #[unstable(feature = "std_misc")]
588 pub unsafe fn register(f: Callback) -> bool {
589 match CALLBACK_CNT.fetch_add(1, Ordering::SeqCst) {
590 // The invocation code has knowledge of this window where the count has
591 // been incremented, but the callback has not been stored. We're
592 // guaranteed that the slot we're storing into is 0.
593 n if n < MAX_CALLBACKS => {
594 let prev = CALLBACKS[n].swap(mem::transmute(f), Ordering::SeqCst);
595 rtassert!(prev == 0);
598 // If we accidentally bumped the count too high, pull it back.
600 CALLBACK_CNT.store(MAX_CALLBACKS, Ordering::SeqCst);