1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 * Code pertaining to cleanup of temporaries as well as execution of
13 * drop glue. See discussion in `doc.rs` for a high-level summary.
16 use lib::llvm::{BasicBlockRef, ValueRef};
17 use middle::trans::base;
18 use middle::trans::build;
19 use middle::trans::callee;
20 use middle::trans::common;
21 use middle::trans::common::{Block, FunctionContext, ExprId};
22 use middle::trans::glue;
23 use middle::trans::type_::Type;
26 use util::ppaux::Repr;
29 pub struct CleanupScope<'a> {
30 // The id of this cleanup scope. If the id is None,
31 // this is a *temporary scope* that is pushed during trans to
32 // cleanup miscellaneous garbage that trans may generate whose
33 // lifetime is a subset of some expression. See module doc for
35 kind: CleanupScopeKind<'a>,
37 // Cleanups to run upon scope exit.
38 cleanups: Vec<Box<Cleanup>>,
40 cached_early_exits: Vec<CachedEarlyExit>,
41 cached_landing_pad: Option<BasicBlockRef>,
44 pub struct CustomScopeIndex {
48 pub static EXIT_BREAK: uint = 0;
49 pub static EXIT_LOOP: uint = 1;
50 pub static EXIT_MAX: uint = 2;
52 pub enum CleanupScopeKind<'a> {
54 AstScopeKind(ast::NodeId),
55 LoopScopeKind(ast::NodeId, [&'a Block<'a>, ..EXIT_MAX])
59 pub enum EarlyExitLabel {
62 LoopExit(ast::NodeId, uint)
65 pub struct CachedEarlyExit {
66 label: EarlyExitLabel,
67 cleanup_block: BasicBlockRef,
71 fn clean_on_unwind(&self) -> bool;
72 fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a>;
76 AstScope(ast::NodeId),
77 CustomScope(CustomScopeIndex)
80 impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
81 fn push_ast_cleanup_scope(&self, id: ast::NodeId) {
83 * Invoked when we start to trans the code contained
84 * within a new cleanup scope.
87 debug!("push_ast_cleanup_scope({})",
88 self.ccx.tcx.map.node_to_str(id));
90 // FIXME(#2202) -- currently closure bodies have a parent
91 // region, which messes up the assertion below, since there
92 // are no cleanup scopes on the stack at the start of
93 // trans'ing a closure body. I think though that this should
94 // eventually be fixed by closure bodies not having a parent
95 // region, though that's a touch unclear, and it might also be
96 // better just to narrow this assertion more (i.e., by
97 // excluding id's that correspond to closure bodies only). For
98 // now we just say that if there is already an AST scope on the stack,
99 // this new AST scope had better be its immediate child.
100 let top_scope = self.top_ast_scope();
101 if top_scope.is_some() {
102 assert_eq!(self.ccx.tcx.region_maps.opt_encl_scope(id), top_scope);
105 self.push_scope(CleanupScope::new(AstScopeKind(id)));
108 fn push_loop_cleanup_scope(&self,
110 exits: [&'a Block<'a>, ..EXIT_MAX]) {
111 debug!("push_loop_cleanup_scope({})",
112 self.ccx.tcx.map.node_to_str(id));
113 assert_eq!(Some(id), self.top_ast_scope());
115 self.push_scope(CleanupScope::new(LoopScopeKind(id, exits)));
118 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
119 let index = self.scopes_len();
120 debug!("push_custom_cleanup_scope(): {}", index);
121 self.push_scope(CleanupScope::new(CustomScopeKind));
122 CustomScopeIndex { index: index }
125 fn pop_and_trans_ast_cleanup_scope(&self,
127 cleanup_scope: ast::NodeId)
130 * Removes the cleanup scope for id `cleanup_scope`, which
131 * must be at the top of the cleanup stack, and generates the
132 * code to do its cleanups for normal exit.
135 debug!("pop_and_trans_ast_cleanup_scope({})",
136 self.ccx.tcx.map.node_to_str(cleanup_scope));
138 assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
140 let scope = self.pop_scope();
141 self.trans_scope_cleanups(bcx, &scope)
145 fn pop_loop_cleanup_scope(&self,
146 cleanup_scope: ast::NodeId) {
148 * Removes the loop cleanup scope for id `cleanup_scope`, which
149 * must be at the top of the cleanup stack. Does not generate
150 * any cleanup code, since loop scopes should exit by
151 * branching to a block generated by `normal_exit_block`.
154 debug!("pop_loop_cleanup_scope({})",
155 self.ccx.tcx.map.node_to_str(cleanup_scope));
157 assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
159 let _ = self.pop_scope();
162 fn pop_custom_cleanup_scope(&self,
163 custom_scope: CustomScopeIndex) {
165 * Removes the top cleanup scope from the stack without
166 * executing its cleanups. The top cleanup scope must
167 * be the temporary scope `custom_scope`.
170 debug!("pop_custom_cleanup_scope({})", custom_scope.index);
171 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
172 let _ = self.pop_scope();
175 fn pop_and_trans_custom_cleanup_scope(&self,
177 custom_scope: CustomScopeIndex)
180 * Removes the top cleanup scope from the stack, which must be
181 * a temporary scope, and generates the code to do its
182 * cleanups for normal exit.
185 debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
186 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
188 let scope = self.pop_scope();
189 self.trans_scope_cleanups(bcx, &scope)
192 fn top_loop_scope(&self) -> ast::NodeId {
194 * Returns the id of the top-most loop scope
197 for scope in self.scopes.borrow().iter().rev() {
199 LoopScopeKind(id, _) => {
205 self.ccx.sess().bug("no loop scope found");
208 fn normal_exit_block(&'a self,
209 cleanup_scope: ast::NodeId,
210 exit: uint) -> BasicBlockRef {
212 * Returns a block to branch to which will perform all pending
213 * cleanups and then break/continue (depending on `exit`) out
214 * of the loop with id `cleanup_scope`
217 self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
220 fn return_exit_block(&'a self) -> BasicBlockRef {
222 * Returns a block to branch to which will perform all pending
223 * cleanups and then return from this function
226 self.trans_cleanups_to_exit_scope(ReturnExit)
229 fn schedule_drop_mem(&self,
230 cleanup_scope: ScopeId,
234 * Schedules a (deep) drop of `val`, which is a pointer to an
238 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
239 let drop = box DropValue {
241 on_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
246 debug!("schedule_drop_mem({:?}, val={}, ty={})",
248 self.ccx.tn.val_to_str(val),
249 ty.repr(self.ccx.tcx()));
251 self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
254 fn schedule_drop_immediate(&self,
255 cleanup_scope: ScopeId,
259 * Schedules a (deep) drop of `val`, which is an instance of `ty`
262 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
263 let drop = box DropValue {
265 on_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
270 debug!("schedule_drop_immediate({:?}, val={}, ty={})",
272 self.ccx.tn.val_to_str(val),
273 ty.repr(self.ccx.tcx()));
275 self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
278 fn schedule_free_value(&self,
279 cleanup_scope: ScopeId,
284 * Schedules a call to `free(val)`. Note that this is a shallow
288 let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
290 debug!("schedule_free_value({:?}, val={}, heap={:?})",
292 self.ccx.tn.val_to_str(val),
295 self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
298 fn schedule_clean(&self,
299 cleanup_scope: ScopeId,
300 cleanup: Box<Cleanup>) {
301 match cleanup_scope {
302 AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
303 CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
307 fn schedule_clean_in_ast_scope(&self,
308 cleanup_scope: ast::NodeId,
309 cleanup: Box<Cleanup>) {
311 * Schedules a cleanup to occur upon exit from `cleanup_scope`.
312 * If `cleanup_scope` is not provided, then the cleanup is scheduled
313 * in the topmost scope, which must be a temporary scope.
316 debug!("schedule_clean_in_ast_scope(cleanup_scope={:?})",
319 for scope in self.scopes.borrow_mut().mut_iter().rev() {
320 if scope.kind.is_ast_with_id(cleanup_scope) {
321 scope.cleanups.push(cleanup);
322 scope.clear_cached_exits();
325 // will be adding a cleanup to some enclosing scope
326 scope.clear_cached_exits();
331 format!("no cleanup scope {} found",
332 self.ccx.tcx.map.node_to_str(cleanup_scope)).as_slice());
335 fn schedule_clean_in_custom_scope(&self,
336 custom_scope: CustomScopeIndex,
337 cleanup: Box<Cleanup>) {
339 * Schedules a cleanup to occur in the top-most scope,
340 * which must be a temporary scope.
343 debug!("schedule_clean_in_custom_scope(custom_scope={})",
346 assert!(self.is_valid_custom_scope(custom_scope));
348 let mut scopes = self.scopes.borrow_mut();
349 let scope = scopes.get_mut(custom_scope.index);
350 scope.cleanups.push(cleanup);
351 scope.clear_cached_exits();
354 fn needs_invoke(&self) -> bool {
356 * Returns true if there are pending cleanups that should
357 * execute on failure.
360 self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
363 fn get_landing_pad(&'a self) -> BasicBlockRef {
365 * Returns a basic block to branch to in the event of a failure.
366 * This block will run the failure cleanups and eventually
367 * invoke the LLVM `Resume` instruction.
370 let _icx = base::push_ctxt("get_landing_pad");
372 debug!("get_landing_pad");
374 let orig_scopes_len = self.scopes_len();
375 assert!(orig_scopes_len > 0);
377 // Remove any scopes that do not have cleanups on failure:
378 let mut popped_scopes = vec!();
379 while !self.top_scope(|s| s.needs_invoke()) {
380 debug!("top scope does not need invoke");
381 popped_scopes.push(self.pop_scope());
384 // Check for an existing landing pad in the new topmost scope:
385 let llbb = self.get_or_create_landing_pad();
387 // Push the scopes we removed back on:
389 match popped_scopes.pop() {
390 Some(scope) => self.push_scope(scope),
395 assert_eq!(self.scopes_len(), orig_scopes_len);
401 impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> {
402 fn top_ast_scope(&self) -> Option<ast::NodeId> {
404 * Returns the id of the current top-most AST scope, if any.
406 for scope in self.scopes.borrow().iter().rev() {
408 CustomScopeKind | LoopScopeKind(..) => {}
417 fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
418 self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
421 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
422 self.is_valid_custom_scope(custom_scope) &&
423 custom_scope.index == self.scopes.borrow().len() - 1
426 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
427 let scopes = self.scopes.borrow();
428 custom_scope.index < scopes.len() &&
429 scopes.get(custom_scope.index).kind.is_temp()
432 fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
434 scope: &CleanupScope) -> &'a Block<'a> {
435 /*! Generates the cleanups for `scope` into `bcx` */
438 if !bcx.unreachable.get() {
439 for cleanup in scope.cleanups.iter().rev() {
440 bcx = cleanup.trans(bcx);
446 fn scopes_len(&self) -> uint {
447 self.scopes.borrow().len()
450 fn push_scope(&self, scope: CleanupScope<'a>) {
451 self.scopes.borrow_mut().push(scope)
454 fn pop_scope(&self) -> CleanupScope<'a> {
455 debug!("popping cleanup scope {}, {} scopes remaining",
456 self.top_scope(|s| s.block_name("")),
457 self.scopes_len() - 1);
459 self.scopes.borrow_mut().pop().unwrap()
462 fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R {
463 f(self.scopes.borrow().last().unwrap())
466 fn trans_cleanups_to_exit_scope(&'a self,
467 label: EarlyExitLabel)
470 * Used when the caller wishes to jump to an early exit, such
471 * as a return, break, continue, or unwind. This function will
472 * generate all cleanups between the top of the stack and the
473 * exit `label` and return a basic block that the caller can
476 * For example, if the current stack of cleanups were as follows:
485 * and the `label` specifies a break from `Loop 23`, then this
486 * function would generate a series of basic blocks as follows:
488 * Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
490 * where `break_blk` is the block specified in `Loop 23` as
491 * the target for breaks. The return value would be the first
492 * basic block in that sequence (`Cleanup(AST 24)`). The
493 * caller could then branch to `Cleanup(AST 24)` and it will
494 * perform all cleanups and finally branch to the `break_blk`.
497 debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
498 label, self.scopes_len());
500 let orig_scopes_len = self.scopes_len();
502 let mut popped_scopes = vec!();
504 // First we pop off all the cleanup stacks that are
505 // traversed until the exit is reached, pushing them
506 // onto the side vector `popped_scopes`. No code is
507 // generated at this time.
509 // So, continuing the example from above, we would wind up
510 // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
511 // (Presuming that there are no cached exits)
513 if self.scopes_len() == 0 {
516 // Generate a block that will `Resume`.
517 let prev_bcx = self.new_block(true, "resume", None);
518 let personality = self.personality.get().expect(
519 "create_landing_pad() should have set this");
520 build::Resume(prev_bcx,
521 build::Load(prev_bcx, personality));
522 prev_llbb = prev_bcx.llbb;
527 prev_llbb = self.get_llreturn();
532 self.ccx.sess().bug(format!(
533 "cannot exit from scope {:?}, \
534 not in scope", id).as_slice());
539 // Check if we have already cached the unwinding of this
540 // scope for this label. If so, we can stop popping scopes
541 // and branch to the cached label, since it contains the
542 // cleanups for any subsequent scopes.
543 match self.top_scope(|s| s.cached_early_exit(label)) {
544 Some(cleanup_block) => {
545 prev_llbb = cleanup_block;
551 // Pop off the scope, since we will be generating
552 // unwinding code for it. If we are searching for a loop exit,
553 // and this scope is that loop, then stop popping and set
554 // `prev_llbb` to the appropriate exit block from the loop.
555 popped_scopes.push(self.pop_scope());
556 let scope = popped_scopes.last().unwrap();
558 UnwindExit | ReturnExit => { }
559 LoopExit(id, exit) => {
560 match scope.kind.early_exit_block(id, exit) {
562 prev_llbb = exitllbb;
572 debug!("trans_cleanups_to_exit_scope: popped {} scopes",
573 popped_scopes.len());
575 // Now push the popped scopes back on. As we go,
576 // we track in `prev_llbb` the exit to which this scope
577 // should branch when it's done.
579 // So, continuing with our example, we will start out with
580 // `prev_llbb` being set to `break_blk` (or possibly a cached
581 // early exit). We will then pop the scopes from `popped_scopes`
582 // and generate a basic block for each one, prepending it in the
583 // series and updating `prev_llbb`. So we begin by popping `Custom 2`
584 // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
585 // branch to `prev_llbb == break_blk`, giving us a sequence like:
587 // Cleanup(Custom 2) -> prev_llbb
589 // We then pop `AST 24` and repeat the process, giving us the sequence:
591 // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
593 // At this point, `popped_scopes` is empty, and so the final block
594 // that we return to the user is `Cleanup(AST 24)`.
595 while !popped_scopes.is_empty() {
596 let mut scope = popped_scopes.pop().unwrap();
598 if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(*c, label))
600 let name = scope.block_name("clean");
601 debug!("generating cleanups for {}", name);
602 let bcx_in = self.new_block(label.is_unwind(),
605 let mut bcx_out = bcx_in;
606 for cleanup in scope.cleanups.iter().rev() {
607 if cleanup_is_suitable_for(*cleanup, label) {
608 bcx_out = cleanup.trans(bcx_out);
611 build::Br(bcx_out, prev_llbb);
612 prev_llbb = bcx_in.llbb;
614 debug!("no suitable cleanups in {}",
615 scope.block_name("clean"));
618 scope.add_cached_early_exit(label, prev_llbb);
619 self.push_scope(scope);
622 debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
624 assert_eq!(self.scopes_len(), orig_scopes_len);
628 fn get_or_create_landing_pad(&'a self) -> BasicBlockRef {
630 * Creates a landing pad for the top scope, if one does not
631 * exist. The landing pad will perform all cleanups necessary
632 * for an unwind and then `resume` to continue error
635 * landing_pad -> ... cleanups ... -> [resume]
637 * (The cleanups and resume instruction are created by
638 * `trans_cleanups_to_exit_scope()`, not in this function
644 debug!("get_or_create_landing_pad");
646 // Check if a landing pad block exists; if not, create one.
648 let mut scopes = self.scopes.borrow_mut();
649 let last_scope = scopes.mut_last().unwrap();
650 match last_scope.cached_landing_pad {
651 Some(llbb) => { return llbb; }
653 let name = last_scope.block_name("unwind");
654 pad_bcx = self.new_block(true, name.as_slice(), None);
655 last_scope.cached_landing_pad = Some(pad_bcx.llbb);
660 // The landing pad return type (the type being propagated). Not sure what
661 // this represents but it's determined by the personality function and
662 // this is what the EH proposal example uses.
663 let llretty = Type::struct_(self.ccx,
664 [Type::i8p(self.ccx), Type::i32(self.ccx)],
667 // The exception handling personality function.
669 // If our compilation unit has the `eh_personality` lang item somewhere
670 // within it, then we just need to translate that. Otherwise, we're
671 // building an rlib which will depend on some upstream implementation of
672 // this function, so we just codegen a generic reference to it. We don't
673 // specify any of the types for the function, we just make it a symbol
674 // that LLVM can later use.
675 let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
676 Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
678 let mut personality = self.ccx.eh_personality.borrow_mut();
680 Some(llpersonality) => llpersonality,
682 let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
683 let f = base::decl_cdecl_fn(self.ccx.llmod,
684 "rust_eh_personality",
687 *personality = Some(f);
694 // The only landing pad clause will be 'cleanup'
695 let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
697 // The landing pad block is a cleanup
698 build::SetCleanup(pad_bcx, llretval);
700 // We store the retval in a function-central alloca, so that calls to
701 // Resume can find it.
702 match self.personality.get() {
704 build::Store(pad_bcx, llretval, addr);
707 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
708 self.personality.set(Some(addr));
709 build::Store(pad_bcx, llretval, addr);
713 // Generate the cleanup block and branch to it.
714 let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
715 build::Br(pad_bcx, cleanup_llbb);
721 impl<'a> CleanupScope<'a> {
722 fn new(kind: CleanupScopeKind<'a>) -> CleanupScope<'a> {
726 cached_early_exits: vec!(),
727 cached_landing_pad: None,
731 fn clear_cached_exits(&mut self) {
732 self.cached_early_exits = vec!();
733 self.cached_landing_pad = None;
736 fn cached_early_exit(&self,
737 label: EarlyExitLabel)
738 -> Option<BasicBlockRef> {
739 self.cached_early_exits.iter().
740 find(|e| e.label == label).
741 map(|e| e.cleanup_block)
744 fn add_cached_early_exit(&mut self,
745 label: EarlyExitLabel,
746 blk: BasicBlockRef) {
747 self.cached_early_exits.push(
748 CachedEarlyExit { label: label,
749 cleanup_block: blk });
752 fn needs_invoke(&self) -> bool {
753 /*! True if this scope has cleanups for use during unwinding */
755 self.cached_landing_pad.is_some() ||
756 self.cleanups.iter().any(|c| c.clean_on_unwind())
759 fn block_name(&self, prefix: &str) -> String {
761 * Returns a suitable name to use for the basic block that
762 * handles this cleanup scope
766 CustomScopeKind => format_strbuf!("{}_custom_", prefix),
767 AstScopeKind(id) => format_strbuf!("{}_ast_{}_", prefix, id),
768 LoopScopeKind(id, _) => format_strbuf!("{}_loop_{}_", prefix, id),
773 impl<'a> CleanupScopeKind<'a> {
774 fn is_temp(&self) -> bool {
776 CustomScopeKind => true,
777 LoopScopeKind(..) | AstScopeKind(..) => false,
781 fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
783 CustomScopeKind | LoopScopeKind(..) => false,
784 AstScopeKind(i) => i == id
788 fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
790 CustomScopeKind | AstScopeKind(..) => false,
791 LoopScopeKind(i, _) => i == id
795 fn early_exit_block(&self,
797 exit: uint) -> Option<BasicBlockRef> {
799 * If this is a loop scope with id `id`, return the early
800 * exit block `exit`, else `None`
804 LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
810 impl EarlyExitLabel {
811 fn is_unwind(&self) -> bool {
819 ///////////////////////////////////////////////////////////////////////////
822 pub struct DropValue {
829 impl Cleanup for DropValue {
830 fn clean_on_unwind(&self) -> bool {
834 fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
835 if self.is_immediate {
836 glue::drop_ty_immediate(bcx, self.val, self.ty)
838 glue::drop_ty(bcx, self.val, self.ty)
848 pub struct FreeValue {
854 impl Cleanup for FreeValue {
855 fn clean_on_unwind(&self) -> bool {
859 fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
862 glue::trans_free(bcx, self.ptr)
865 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
871 pub fn temporary_scope(tcx: &ty::ctxt,
874 match tcx.region_maps.temporary_scope(id) {
876 let r = AstScope(scope);
877 debug!("temporary_scope({}) = {:?}", id, r);
881 tcx.sess.bug(format!("no temporary scope available for expr {}",
887 pub fn var_scope(tcx: &ty::ctxt,
890 let r = AstScope(tcx.region_maps.var_scope(id));
891 debug!("var_scope({}) = {:?}", id, r);
895 fn cleanup_is_suitable_for(c: &Cleanup,
896 label: EarlyExitLabel) -> bool {
897 !label.is_unwind() || c.clean_on_unwind()
900 ///////////////////////////////////////////////////////////////////////////
901 // These traits just exist to put the methods into this file.
903 pub trait CleanupMethods<'a> {
904 fn push_ast_cleanup_scope(&self, id: ast::NodeId);
905 fn push_loop_cleanup_scope(&self,
907 exits: [&'a Block<'a>, ..EXIT_MAX]);
908 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
909 fn pop_and_trans_ast_cleanup_scope(&self,
911 cleanup_scope: ast::NodeId)
913 fn pop_loop_cleanup_scope(&self,
914 cleanup_scope: ast::NodeId);
915 fn pop_custom_cleanup_scope(&self,
916 custom_scope: CustomScopeIndex);
917 fn pop_and_trans_custom_cleanup_scope(&self,
919 custom_scope: CustomScopeIndex)
921 fn top_loop_scope(&self) -> ast::NodeId;
922 fn normal_exit_block(&'a self,
923 cleanup_scope: ast::NodeId,
924 exit: uint) -> BasicBlockRef;
925 fn return_exit_block(&'a self) -> BasicBlockRef;
926 fn schedule_drop_mem(&self,
927 cleanup_scope: ScopeId,
930 fn schedule_drop_immediate(&self,
931 cleanup_scope: ScopeId,
934 fn schedule_free_value(&self,
935 cleanup_scope: ScopeId,
939 fn schedule_clean(&self,
940 cleanup_scope: ScopeId,
941 cleanup: Box<Cleanup>);
942 fn schedule_clean_in_ast_scope(&self,
943 cleanup_scope: ast::NodeId,
944 cleanup: Box<Cleanup>);
945 fn schedule_clean_in_custom_scope(&self,
946 custom_scope: CustomScopeIndex,
947 cleanup: Box<Cleanup>);
948 fn needs_invoke(&self) -> bool;
949 fn get_landing_pad(&'a self) -> BasicBlockRef;
952 trait CleanupHelperMethods<'a> {
953 fn top_ast_scope(&self) -> Option<ast::NodeId>;
954 fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
955 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
956 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
957 fn trans_scope_cleanups(&self,
959 scope: &CleanupScope<'a>) -> &'a Block<'a>;
960 fn trans_cleanups_to_exit_scope(&'a self,
961 label: EarlyExitLabel)
963 fn get_or_create_landing_pad(&'a self) -> BasicBlockRef;
964 fn scopes_len(&self) -> uint;
965 fn push_scope(&self, scope: CleanupScope<'a>);
966 fn pop_scope(&self) -> CleanupScope<'a>;
967 fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R;