1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 * Code pertaining to cleanup of temporaries as well as execution of
13 * drop glue. See discussion in `doc.rs` for a high-level summary.
16 pub use self::ScopeId::*;
17 pub use self::CleanupScopeKind::*;
18 pub use self::EarlyExitLabel::*;
19 pub use self::Heap::*;
21 use llvm::{BasicBlockRef, ValueRef};
26 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
30 use trans::type_::Type;
31 use middle::ty::{mod, Ty};
34 use util::ppaux::Repr;
36 pub struct CleanupScope<'blk, 'tcx: 'blk> {
37 // The id of this cleanup scope. If the id is None,
38 // this is a *temporary scope* that is pushed during trans to
39 // cleanup miscellaneous garbage that trans may generate whose
40 // lifetime is a subset of some expression. See module doc for
42 kind: CleanupScopeKind<'blk, 'tcx>,
44 // Cleanups to run upon scope exit.
45 cleanups: Vec<CleanupObj<'tcx>>,
47 // The debug location any drop calls generated for this scope will be
49 debug_loc: Option<NodeInfo>,
51 cached_early_exits: Vec<CachedEarlyExit>,
52 cached_landing_pad: Option<BasicBlockRef>,
56 pub struct CustomScopeIndex {
60 pub const EXIT_BREAK: uint = 0;
61 pub const EXIT_LOOP: uint = 1;
62 pub const EXIT_MAX: uint = 2;
64 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
66 AstScopeKind(ast::NodeId),
67 LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>, ..EXIT_MAX])
70 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
71 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
73 CustomScopeKind => write!(f, "CustomScopeKind"),
74 AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
75 LoopScopeKind(nid, ref blks) => {
76 try!(write!(f, "LoopScopeKind({}, [", nid));
77 for blk in blks.iter() {
78 try!(write!(f, "{:p}, ", blk));
86 #[deriving(PartialEq, Show)]
87 pub enum EarlyExitLabel {
90 LoopExit(ast::NodeId, uint)
93 pub struct CachedEarlyExit {
94 label: EarlyExitLabel,
95 cleanup_block: BasicBlockRef,
98 pub trait Cleanup<'tcx> {
99 fn must_unwind(&self) -> bool;
100 fn clean_on_unwind(&self) -> bool;
101 fn is_lifetime_end(&self) -> bool;
102 fn trans<'blk>(&self,
103 bcx: Block<'blk, 'tcx>,
104 debug_loc: Option<NodeInfo>)
105 -> Block<'blk, 'tcx>;
108 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
112 AstScope(ast::NodeId),
113 CustomScope(CustomScopeIndex)
116 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
117 fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
119 * Invoked when we start to trans the code contained
120 * within a new cleanup scope.
123 debug!("push_ast_cleanup_scope({})",
124 self.ccx.tcx().map.node_to_string(debug_loc.id));
126 // FIXME(#2202) -- currently closure bodies have a parent
127 // region, which messes up the assertion below, since there
128 // are no cleanup scopes on the stack at the start of
129 // trans'ing a closure body. I think though that this should
130 // eventually be fixed by closure bodies not having a parent
131 // region, though that's a touch unclear, and it might also be
132 // better just to narrow this assertion more (i.e., by
133 // excluding id's that correspond to closure bodies only). For
134 // now we just say that if there is already an AST scope on the stack,
135 // this new AST scope had better be its immediate child.
136 let top_scope = self.top_ast_scope();
137 if top_scope.is_some() {
141 .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
142 .map(|s|s.node_id()),
146 self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
150 fn push_loop_cleanup_scope(&self,
152 exits: [Block<'blk, 'tcx>, ..EXIT_MAX]) {
153 debug!("push_loop_cleanup_scope({})",
154 self.ccx.tcx().map.node_to_string(id));
155 assert_eq!(Some(id), self.top_ast_scope());
157 // Just copy the debuginfo source location from the enclosing scope
158 let debug_loc = self.scopes
164 self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
167 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
168 let index = self.scopes_len();
169 debug!("push_custom_cleanup_scope(): {}", index);
171 // Just copy the debuginfo source location from the enclosing scope
172 let debug_loc = self.scopes
175 .map(|opt_scope| opt_scope.debug_loc)
178 self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
179 CustomScopeIndex { index: index }
182 fn push_custom_cleanup_scope_with_debug_loc(&self,
184 -> CustomScopeIndex {
185 let index = self.scopes_len();
186 debug!("push_custom_cleanup_scope(): {}", index);
188 self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
189 CustomScopeIndex { index: index }
192 fn pop_and_trans_ast_cleanup_scope(&self,
193 bcx: Block<'blk, 'tcx>,
194 cleanup_scope: ast::NodeId)
195 -> Block<'blk, 'tcx> {
197 * Removes the cleanup scope for id `cleanup_scope`, which
198 * must be at the top of the cleanup stack, and generates the
199 * code to do its cleanups for normal exit.
202 debug!("pop_and_trans_ast_cleanup_scope({})",
203 self.ccx.tcx().map.node_to_string(cleanup_scope));
205 assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
207 let scope = self.pop_scope();
208 self.trans_scope_cleanups(bcx, &scope)
211 fn pop_loop_cleanup_scope(&self,
212 cleanup_scope: ast::NodeId) {
214 * Removes the loop cleanup scope for id `cleanup_scope`, which
215 * must be at the top of the cleanup stack. Does not generate
216 * any cleanup code, since loop scopes should exit by
217 * branching to a block generated by `normal_exit_block`.
220 debug!("pop_loop_cleanup_scope({})",
221 self.ccx.tcx().map.node_to_string(cleanup_scope));
223 assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
225 let _ = self.pop_scope();
228 fn pop_custom_cleanup_scope(&self,
229 custom_scope: CustomScopeIndex) {
231 * Removes the top cleanup scope from the stack without
232 * executing its cleanups. The top cleanup scope must
233 * be the temporary scope `custom_scope`.
236 debug!("pop_custom_cleanup_scope({})", custom_scope.index);
237 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
238 let _ = self.pop_scope();
241 fn pop_and_trans_custom_cleanup_scope(&self,
242 bcx: Block<'blk, 'tcx>,
243 custom_scope: CustomScopeIndex)
244 -> Block<'blk, 'tcx> {
246 * Removes the top cleanup scope from the stack, which must be
247 * a temporary scope, and generates the code to do its
248 * cleanups for normal exit.
251 debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
252 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
254 let scope = self.pop_scope();
255 self.trans_scope_cleanups(bcx, &scope)
258 fn top_loop_scope(&self) -> ast::NodeId {
260 * Returns the id of the top-most loop scope
263 for scope in self.scopes.borrow().iter().rev() {
265 LoopScopeKind(id, _) => {
271 self.ccx.sess().bug("no loop scope found");
274 fn normal_exit_block(&'blk self,
275 cleanup_scope: ast::NodeId,
276 exit: uint) -> BasicBlockRef {
278 * Returns a block to branch to which will perform all pending
279 * cleanups and then break/continue (depending on `exit`) out
280 * of the loop with id `cleanup_scope`
283 self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
286 fn return_exit_block(&'blk self) -> BasicBlockRef {
288 * Returns a block to branch to which will perform all pending
289 * cleanups and then return from this function
292 self.trans_cleanups_to_exit_scope(ReturnExit)
295 fn schedule_lifetime_end(&self,
296 cleanup_scope: ScopeId,
298 let drop = box LifetimeEnd {
302 debug!("schedule_lifetime_end({}, val={})",
304 self.ccx.tn().val_to_string(val));
306 self.schedule_clean(cleanup_scope, drop as CleanupObj);
309 fn schedule_drop_mem(&self,
310 cleanup_scope: ScopeId,
314 * Schedules a (deep) drop of `val`, which is a pointer to an
318 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
319 let drop = box DropValue {
321 must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
327 debug!("schedule_drop_mem({}, val={}, ty={})",
329 self.ccx.tn().val_to_string(val),
330 ty.repr(self.ccx.tcx()));
332 self.schedule_clean(cleanup_scope, drop as CleanupObj);
335 fn schedule_drop_and_zero_mem(&self,
336 cleanup_scope: ScopeId,
340 * Schedules a (deep) drop and zero-ing of `val`, which is a pointer
341 * to an instance of `ty`
344 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
345 let drop = box DropValue {
347 must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
353 debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
355 self.ccx.tn().val_to_string(val),
356 ty.repr(self.ccx.tcx()),
359 self.schedule_clean(cleanup_scope, drop as CleanupObj);
362 fn schedule_drop_immediate(&self,
363 cleanup_scope: ScopeId,
367 * Schedules a (deep) drop of `val`, which is an instance of `ty`
370 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
371 let drop = box DropValue {
373 must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
379 debug!("schedule_drop_immediate({}, val={}, ty={})",
381 self.ccx.tn().val_to_string(val),
382 ty.repr(self.ccx.tcx()));
384 self.schedule_clean(cleanup_scope, drop as CleanupObj);
387 fn schedule_free_value(&self,
388 cleanup_scope: ScopeId,
391 content_ty: Ty<'tcx>) {
393 * Schedules a call to `free(val)`. Note that this is a shallow
397 let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
399 debug!("schedule_free_value({}, val={}, heap={})",
401 self.ccx.tn().val_to_string(val),
404 self.schedule_clean(cleanup_scope, drop as CleanupObj);
407 fn schedule_free_slice(&self,
408 cleanup_scope: ScopeId,
414 * Schedules a call to `free(val)`. Note that this is a shallow
418 let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
420 debug!("schedule_free_slice({}, val={}, heap={})",
422 self.ccx.tn().val_to_string(val),
425 self.schedule_clean(cleanup_scope, drop as CleanupObj);
428 fn schedule_clean(&self,
429 cleanup_scope: ScopeId,
430 cleanup: CleanupObj<'tcx>) {
431 match cleanup_scope {
432 AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
433 CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
437 fn schedule_clean_in_ast_scope(&self,
438 cleanup_scope: ast::NodeId,
439 cleanup: CleanupObj<'tcx>) {
441 * Schedules a cleanup to occur upon exit from `cleanup_scope`.
442 * If `cleanup_scope` is not provided, then the cleanup is scheduled
443 * in the topmost scope, which must be a temporary scope.
446 debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
449 for scope in self.scopes.borrow_mut().iter_mut().rev() {
450 if scope.kind.is_ast_with_id(cleanup_scope) {
451 scope.cleanups.push(cleanup);
452 scope.clear_cached_exits();
455 // will be adding a cleanup to some enclosing scope
456 scope.clear_cached_exits();
461 format!("no cleanup scope {} found",
462 self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
465 fn schedule_clean_in_custom_scope(&self,
466 custom_scope: CustomScopeIndex,
467 cleanup: CleanupObj<'tcx>) {
469 * Schedules a cleanup to occur in the top-most scope,
470 * which must be a temporary scope.
473 debug!("schedule_clean_in_custom_scope(custom_scope={})",
476 assert!(self.is_valid_custom_scope(custom_scope));
478 let mut scopes = self.scopes.borrow_mut();
479 let scope = &mut (*scopes)[custom_scope.index];
480 scope.cleanups.push(cleanup);
481 scope.clear_cached_exits();
484 fn needs_invoke(&self) -> bool {
486 * Returns true if there are pending cleanups that should
490 self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
493 fn get_landing_pad(&'blk self) -> BasicBlockRef {
495 * Returns a basic block to branch to in the event of a panic.
496 * This block will run the panic cleanups and eventually
497 * invoke the LLVM `Resume` instruction.
500 let _icx = base::push_ctxt("get_landing_pad");
502 debug!("get_landing_pad");
504 let orig_scopes_len = self.scopes_len();
505 assert!(orig_scopes_len > 0);
507 // Remove any scopes that do not have cleanups on panic:
508 let mut popped_scopes = vec!();
509 while !self.top_scope(|s| s.needs_invoke()) {
510 debug!("top scope does not need invoke");
511 popped_scopes.push(self.pop_scope());
514 // Check for an existing landing pad in the new topmost scope:
515 let llbb = self.get_or_create_landing_pad();
517 // Push the scopes we removed back on:
519 match popped_scopes.pop() {
520 Some(scope) => self.push_scope(scope),
525 assert_eq!(self.scopes_len(), orig_scopes_len);
531 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
532 fn top_ast_scope(&self) -> Option<ast::NodeId> {
534 * Returns the id of the current top-most AST scope, if any.
536 for scope in self.scopes.borrow().iter().rev() {
538 CustomScopeKind | LoopScopeKind(..) => {}
547 fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
548 self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
551 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
552 self.is_valid_custom_scope(custom_scope) &&
553 custom_scope.index == self.scopes.borrow().len() - 1
556 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
557 let scopes = self.scopes.borrow();
558 custom_scope.index < scopes.len() &&
559 (*scopes)[custom_scope.index].kind.is_temp()
562 fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
563 bcx: Block<'blk, 'tcx>,
564 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
565 /*! Generates the cleanups for `scope` into `bcx` */
568 if !bcx.unreachable.get() {
569 for cleanup in scope.cleanups.iter().rev() {
570 bcx = cleanup.trans(bcx, scope.debug_loc);
576 fn scopes_len(&self) -> uint {
577 self.scopes.borrow().len()
580 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
581 self.scopes.borrow_mut().push(scope)
584 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
585 debug!("popping cleanup scope {}, {} scopes remaining",
586 self.top_scope(|s| s.block_name("")),
587 self.scopes_len() - 1);
589 self.scopes.borrow_mut().pop().unwrap()
592 fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R {
593 f(self.scopes.borrow().last().unwrap())
596 fn trans_cleanups_to_exit_scope(&'blk self,
597 label: EarlyExitLabel)
600 * Used when the caller wishes to jump to an early exit, such
601 * as a return, break, continue, or unwind. This function will
602 * generate all cleanups between the top of the stack and the
603 * exit `label` and return a basic block that the caller can
606 * For example, if the current stack of cleanups were as follows:
615 * and the `label` specifies a break from `Loop 23`, then this
616 * function would generate a series of basic blocks as follows:
618 * Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
620 * where `break_blk` is the block specified in `Loop 23` as
621 * the target for breaks. The return value would be the first
622 * basic block in that sequence (`Cleanup(AST 24)`). The
623 * caller could then branch to `Cleanup(AST 24)` and it will
624 * perform all cleanups and finally branch to the `break_blk`.
627 debug!("trans_cleanups_to_exit_scope label={} scopes={}",
628 label, self.scopes_len());
630 let orig_scopes_len = self.scopes_len();
632 let mut popped_scopes = vec!();
634 // First we pop off all the cleanup stacks that are
635 // traversed until the exit is reached, pushing them
636 // onto the side vector `popped_scopes`. No code is
637 // generated at this time.
639 // So, continuing the example from above, we would wind up
640 // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
641 // (Presuming that there are no cached exits)
643 if self.scopes_len() == 0 {
646 // Generate a block that will `Resume`.
647 let prev_bcx = self.new_block(true, "resume", None);
648 let personality = self.personality.get().expect(
649 "create_landing_pad() should have set this");
650 build::Resume(prev_bcx,
651 build::Load(prev_bcx, personality));
652 prev_llbb = prev_bcx.llbb;
657 prev_llbb = self.get_llreturn();
662 self.ccx.sess().bug(format!(
663 "cannot exit from scope {}, \
664 not in scope", id).as_slice());
669 // Check if we have already cached the unwinding of this
670 // scope for this label. If so, we can stop popping scopes
671 // and branch to the cached label, since it contains the
672 // cleanups for any subsequent scopes.
673 match self.top_scope(|s| s.cached_early_exit(label)) {
674 Some(cleanup_block) => {
675 prev_llbb = cleanup_block;
681 // Pop off the scope, since we will be generating
682 // unwinding code for it. If we are searching for a loop exit,
683 // and this scope is that loop, then stop popping and set
684 // `prev_llbb` to the appropriate exit block from the loop.
685 popped_scopes.push(self.pop_scope());
686 let scope = popped_scopes.last().unwrap();
688 UnwindExit | ReturnExit => { }
689 LoopExit(id, exit) => {
690 match scope.kind.early_exit_block(id, exit) {
692 prev_llbb = exitllbb;
702 debug!("trans_cleanups_to_exit_scope: popped {} scopes",
703 popped_scopes.len());
705 // Now push the popped scopes back on. As we go,
706 // we track in `prev_llbb` the exit to which this scope
707 // should branch when it's done.
709 // So, continuing with our example, we will start out with
710 // `prev_llbb` being set to `break_blk` (or possibly a cached
711 // early exit). We will then pop the scopes from `popped_scopes`
712 // and generate a basic block for each one, prepending it in the
713 // series and updating `prev_llbb`. So we begin by popping `Custom 2`
714 // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
715 // branch to `prev_llbb == break_blk`, giving us a sequence like:
717 // Cleanup(Custom 2) -> prev_llbb
719 // We then pop `AST 24` and repeat the process, giving us the sequence:
721 // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
723 // At this point, `popped_scopes` is empty, and so the final block
724 // that we return to the user is `Cleanup(AST 24)`.
725 while !popped_scopes.is_empty() {
726 let mut scope = popped_scopes.pop().unwrap();
728 if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
730 let name = scope.block_name("clean");
731 debug!("generating cleanups for {}", name);
732 let bcx_in = self.new_block(label.is_unwind(),
735 let mut bcx_out = bcx_in;
736 for cleanup in scope.cleanups.iter().rev() {
737 if cleanup_is_suitable_for(&**cleanup, label) {
738 bcx_out = cleanup.trans(bcx_out,
742 build::Br(bcx_out, prev_llbb);
743 prev_llbb = bcx_in.llbb;
745 debug!("no suitable cleanups in {}",
746 scope.block_name("clean"));
749 scope.add_cached_early_exit(label, prev_llbb);
750 self.push_scope(scope);
753 debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
755 assert_eq!(self.scopes_len(), orig_scopes_len);
759 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
761 * Creates a landing pad for the top scope, if one does not
762 * exist. The landing pad will perform all cleanups necessary
763 * for an unwind and then `resume` to continue error
766 * landing_pad -> ... cleanups ... -> [resume]
768 * (The cleanups and resume instruction are created by
769 * `trans_cleanups_to_exit_scope()`, not in this function
775 debug!("get_or_create_landing_pad");
777 // Check if a landing pad block exists; if not, create one.
779 let mut scopes = self.scopes.borrow_mut();
780 let last_scope = scopes.last_mut().unwrap();
781 match last_scope.cached_landing_pad {
782 Some(llbb) => { return llbb; }
784 let name = last_scope.block_name("unwind");
785 pad_bcx = self.new_block(true, name.as_slice(), None);
786 last_scope.cached_landing_pad = Some(pad_bcx.llbb);
791 // The landing pad return type (the type being propagated). Not sure what
792 // this represents but it's determined by the personality function and
793 // this is what the EH proposal example uses.
794 let llretty = Type::struct_(self.ccx,
795 &[Type::i8p(self.ccx), Type::i32(self.ccx)],
798 // The exception handling personality function.
800 // If our compilation unit has the `eh_personality` lang item somewhere
801 // within it, then we just need to translate that. Otherwise, we're
802 // building an rlib which will depend on some upstream implementation of
803 // this function, so we just codegen a generic reference to it. We don't
804 // specify any of the types for the function, we just make it a symbol
805 // that LLVM can later use.
806 let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
807 Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
809 let mut personality = self.ccx.eh_personality().borrow_mut();
811 Some(llpersonality) => llpersonality,
813 let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
814 let f = base::decl_cdecl_fn(self.ccx,
815 "rust_eh_personality",
818 *personality = Some(f);
825 // The only landing pad clause will be 'cleanup'
826 let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
828 // The landing pad block is a cleanup
829 build::SetCleanup(pad_bcx, llretval);
831 // We store the retval in a function-central alloca, so that calls to
832 // Resume can find it.
833 match self.personality.get() {
835 build::Store(pad_bcx, llretval, addr);
838 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
839 self.personality.set(Some(addr));
840 build::Store(pad_bcx, llretval, addr);
844 // Generate the cleanup block and branch to it.
845 let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
846 build::Br(pad_bcx, cleanup_llbb);
852 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
853 fn new(kind: CleanupScopeKind<'blk, 'tcx>,
854 debug_loc: Option<NodeInfo>)
855 -> CleanupScope<'blk, 'tcx> {
858 debug_loc: debug_loc,
860 cached_early_exits: vec!(),
861 cached_landing_pad: None,
865 fn clear_cached_exits(&mut self) {
866 self.cached_early_exits = vec!();
867 self.cached_landing_pad = None;
870 fn cached_early_exit(&self,
871 label: EarlyExitLabel)
872 -> Option<BasicBlockRef> {
873 self.cached_early_exits.iter().
874 find(|e| e.label == label).
875 map(|e| e.cleanup_block)
878 fn add_cached_early_exit(&mut self,
879 label: EarlyExitLabel,
880 blk: BasicBlockRef) {
881 self.cached_early_exits.push(
882 CachedEarlyExit { label: label,
883 cleanup_block: blk });
886 fn needs_invoke(&self) -> bool {
887 /*! True if this scope has cleanups that need unwinding */
889 self.cached_landing_pad.is_some() ||
890 self.cleanups.iter().any(|c| c.must_unwind())
893 fn block_name(&self, prefix: &str) -> String {
895 * Returns a suitable name to use for the basic block that
896 * handles this cleanup scope
900 CustomScopeKind => format!("{}_custom_", prefix),
901 AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
902 LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
906 pub fn drop_non_lifetime_clean(&mut self) {
907 self.cleanups.retain(|c| c.is_lifetime_end());
911 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
912 fn is_temp(&self) -> bool {
914 CustomScopeKind => true,
915 LoopScopeKind(..) | AstScopeKind(..) => false,
919 fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
921 CustomScopeKind | LoopScopeKind(..) => false,
922 AstScopeKind(i) => i == id
926 fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
928 CustomScopeKind | AstScopeKind(..) => false,
929 LoopScopeKind(i, _) => i == id
933 fn early_exit_block(&self,
935 exit: uint) -> Option<BasicBlockRef> {
937 * If this is a loop scope with id `id`, return the early
938 * exit block `exit`, else `None`
942 LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
948 impl EarlyExitLabel {
949 fn is_unwind(&self) -> bool {
957 ///////////////////////////////////////////////////////////////////////////
960 pub struct DropValue<'tcx> {
968 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
969 fn must_unwind(&self) -> bool {
973 fn clean_on_unwind(&self) -> bool {
977 fn is_lifetime_end(&self) -> bool {
981 fn trans<'blk>(&self,
982 bcx: Block<'blk, 'tcx>,
983 debug_loc: Option<NodeInfo>)
984 -> Block<'blk, 'tcx> {
985 let bcx = if self.is_immediate {
986 glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
988 glue::drop_ty(bcx, self.val, self.ty, debug_loc)
991 base::zero_mem(bcx, self.val, self.ty);
1002 pub struct FreeValue<'tcx> {
1005 content_ty: Ty<'tcx>
1008 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
1009 fn must_unwind(&self) -> bool {
1013 fn clean_on_unwind(&self) -> bool {
1017 fn is_lifetime_end(&self) -> bool {
1021 fn trans<'blk>(&self,
1022 bcx: Block<'blk, 'tcx>,
1023 debug_loc: Option<NodeInfo>)
1024 -> Block<'blk, 'tcx> {
1025 apply_debug_loc(bcx.fcx, debug_loc);
1029 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
1035 pub struct FreeSlice {
1042 impl<'tcx> Cleanup<'tcx> for FreeSlice {
1043 fn must_unwind(&self) -> bool {
1047 fn clean_on_unwind(&self) -> bool {
1051 fn is_lifetime_end(&self) -> bool {
1055 fn trans<'blk, 'tcx>(&self,
1056 bcx: Block<'blk, 'tcx>,
1057 debug_loc: Option<NodeInfo>)
1058 -> Block<'blk, 'tcx> {
1059 apply_debug_loc(bcx.fcx, debug_loc);
1063 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
1069 pub struct LifetimeEnd {
1073 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
1074 fn must_unwind(&self) -> bool {
1078 fn clean_on_unwind(&self) -> bool {
1082 fn is_lifetime_end(&self) -> bool {
1086 fn trans<'blk, 'tcx>(&self,
1087 bcx: Block<'blk, 'tcx>,
1088 debug_loc: Option<NodeInfo>)
1089 -> Block<'blk, 'tcx> {
1090 apply_debug_loc(bcx.fcx, debug_loc);
1091 base::call_lifetime_end(bcx, self.ptr);
1096 pub fn temporary_scope(tcx: &ty::ctxt,
1099 match tcx.region_maps.temporary_scope(id) {
1101 let r = AstScope(scope.node_id());
1102 debug!("temporary_scope({}) = {}", id, r);
1106 tcx.sess.bug(format!("no temporary scope available for expr {}",
1112 pub fn var_scope(tcx: &ty::ctxt,
1115 let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1116 debug!("var_scope({}) = {}", id, r);
1120 fn cleanup_is_suitable_for(c: &Cleanup,
1121 label: EarlyExitLabel) -> bool {
1122 !label.is_unwind() || c.clean_on_unwind()
1125 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1127 Some(ref src_loc) => {
1128 debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1131 debuginfo::clear_source_location(fcx);
1136 ///////////////////////////////////////////////////////////////////////////
1137 // These traits just exist to put the methods into this file.
1139 pub trait CleanupMethods<'blk, 'tcx> {
1140 fn push_ast_cleanup_scope(&self, id: NodeInfo);
1141 fn push_loop_cleanup_scope(&self,
1143 exits: [Block<'blk, 'tcx>, ..EXIT_MAX]);
1144 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1145 fn push_custom_cleanup_scope_with_debug_loc(&self,
1146 debug_loc: NodeInfo)
1147 -> CustomScopeIndex;
1148 fn pop_and_trans_ast_cleanup_scope(&self,
1149 bcx: Block<'blk, 'tcx>,
1150 cleanup_scope: ast::NodeId)
1151 -> Block<'blk, 'tcx>;
1152 fn pop_loop_cleanup_scope(&self,
1153 cleanup_scope: ast::NodeId);
1154 fn pop_custom_cleanup_scope(&self,
1155 custom_scope: CustomScopeIndex);
1156 fn pop_and_trans_custom_cleanup_scope(&self,
1157 bcx: Block<'blk, 'tcx>,
1158 custom_scope: CustomScopeIndex)
1159 -> Block<'blk, 'tcx>;
1160 fn top_loop_scope(&self) -> ast::NodeId;
1161 fn normal_exit_block(&'blk self,
1162 cleanup_scope: ast::NodeId,
1163 exit: uint) -> BasicBlockRef;
1164 fn return_exit_block(&'blk self) -> BasicBlockRef;
1165 fn schedule_lifetime_end(&self,
1166 cleanup_scope: ScopeId,
1168 fn schedule_drop_mem(&self,
1169 cleanup_scope: ScopeId,
1172 fn schedule_drop_and_zero_mem(&self,
1173 cleanup_scope: ScopeId,
1176 fn schedule_drop_immediate(&self,
1177 cleanup_scope: ScopeId,
1180 fn schedule_free_value(&self,
1181 cleanup_scope: ScopeId,
1184 content_ty: Ty<'tcx>);
1185 fn schedule_free_slice(&self,
1186 cleanup_scope: ScopeId,
1191 fn schedule_clean(&self,
1192 cleanup_scope: ScopeId,
1193 cleanup: CleanupObj<'tcx>);
1194 fn schedule_clean_in_ast_scope(&self,
1195 cleanup_scope: ast::NodeId,
1196 cleanup: CleanupObj<'tcx>);
1197 fn schedule_clean_in_custom_scope(&self,
1198 custom_scope: CustomScopeIndex,
1199 cleanup: CleanupObj<'tcx>);
1200 fn needs_invoke(&self) -> bool;
1201 fn get_landing_pad(&'blk self) -> BasicBlockRef;
1204 trait CleanupHelperMethods<'blk, 'tcx> {
1205 fn top_ast_scope(&self) -> Option<ast::NodeId>;
1206 fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1207 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1208 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1209 fn trans_scope_cleanups(&self,
1210 bcx: Block<'blk, 'tcx>,
1211 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1212 fn trans_cleanups_to_exit_scope(&'blk self,
1213 label: EarlyExitLabel)
1215 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1216 fn scopes_len(&self) -> uint;
1217 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1218 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1219 fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R;