1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Code pertaining to cleanup of temporaries as well as execution of
12 //! drop glue. See discussion in `doc.rs` for a high-level summary.
14 pub use self::ScopeId::*;
15 pub use self::CleanupScopeKind::*;
16 pub use self::EarlyExitLabel::*;
17 pub use self::Heap::*;
19 use llvm::{BasicBlockRef, ValueRef};
24 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
27 // Temporary due to slicing syntax hacks (KILLME)
29 use trans::type_::Type;
30 use middle::ty::{mod, Ty};
33 use util::ppaux::Repr;
35 pub struct CleanupScope<'blk, 'tcx: 'blk> {
36 // The id of this cleanup scope. If the id is None,
37 // this is a *temporary scope* that is pushed during trans to
38 // cleanup miscellaneous garbage that trans may generate whose
39 // lifetime is a subset of some expression. See module doc for
41 kind: CleanupScopeKind<'blk, 'tcx>,
43 // Cleanups to run upon scope exit.
44 cleanups: Vec<CleanupObj<'tcx>>,
46 // The debug location any drop calls generated for this scope will be
48 debug_loc: Option<NodeInfo>,
50 cached_early_exits: Vec<CachedEarlyExit>,
51 cached_landing_pad: Option<BasicBlockRef>,
54 #[deriving(Copy, Show)]
55 pub struct CustomScopeIndex {
59 pub const EXIT_BREAK: uint = 0;
60 pub const EXIT_LOOP: uint = 1;
61 pub const EXIT_MAX: uint = 2;
63 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
65 AstScopeKind(ast::NodeId),
66 LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
69 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
70 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72 CustomScopeKind => write!(f, "CustomScopeKind"),
73 AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
74 LoopScopeKind(nid, ref blks) => {
75 try!(write!(f, "LoopScopeKind({}, [", nid));
76 for blk in blks.iter() {
77 try!(write!(f, "{:p}, ", blk));
85 #[deriving(Copy, PartialEq, Show)]
86 pub enum EarlyExitLabel {
89 LoopExit(ast::NodeId, uint)
93 pub struct CachedEarlyExit {
94 label: EarlyExitLabel,
95 cleanup_block: BasicBlockRef,
98 pub trait Cleanup<'tcx> {
99 fn must_unwind(&self) -> bool;
100 fn clean_on_unwind(&self) -> bool;
101 fn is_lifetime_end(&self) -> bool;
102 fn trans<'blk>(&self,
103 bcx: Block<'blk, 'tcx>,
104 debug_loc: Option<NodeInfo>)
105 -> Block<'blk, 'tcx>;
108 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
110 #[deriving(Copy, Show)]
112 AstScope(ast::NodeId),
113 CustomScope(CustomScopeIndex)
116 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
117 /// Invoked when we start to trans the code contained within a new cleanup scope.
118 fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
119 debug!("push_ast_cleanup_scope({})",
120 self.ccx.tcx().map.node_to_string(debug_loc.id));
122 // FIXME(#2202) -- currently closure bodies have a parent
123 // region, which messes up the assertion below, since there
124 // are no cleanup scopes on the stack at the start of
125 // trans'ing a closure body. I think though that this should
126 // eventually be fixed by closure bodies not having a parent
127 // region, though that's a touch unclear, and it might also be
128 // better just to narrow this assertion more (i.e., by
129 // excluding id's that correspond to closure bodies only). For
130 // now we just say that if there is already an AST scope on the stack,
131 // this new AST scope had better be its immediate child.
132 // Temporarily removed due to slicing syntax hacks (KILLME).
133 /*let top_scope = self.top_ast_scope();
134 if top_scope.is_some() {
138 .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
139 .map(|s|s.node_id()),
143 self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
147 fn push_loop_cleanup_scope(&self,
149 exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
150 debug!("push_loop_cleanup_scope({})",
151 self.ccx.tcx().map.node_to_string(id));
152 assert_eq!(Some(id), self.top_ast_scope());
154 // Just copy the debuginfo source location from the enclosing scope
155 let debug_loc = self.scopes
161 self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
164 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
165 let index = self.scopes_len();
166 debug!("push_custom_cleanup_scope(): {}", index);
168 // Just copy the debuginfo source location from the enclosing scope
169 let debug_loc = self.scopes
172 .map(|opt_scope| opt_scope.debug_loc)
175 self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
176 CustomScopeIndex { index: index }
179 fn push_custom_cleanup_scope_with_debug_loc(&self,
181 -> CustomScopeIndex {
182 let index = self.scopes_len();
183 debug!("push_custom_cleanup_scope(): {}", index);
185 self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
186 CustomScopeIndex { index: index }
189 /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
190 /// stack, and generates the code to do its cleanups for normal exit.
191 fn pop_and_trans_ast_cleanup_scope(&self,
192 bcx: Block<'blk, 'tcx>,
193 cleanup_scope: ast::NodeId)
194 -> Block<'blk, 'tcx> {
195 debug!("pop_and_trans_ast_cleanup_scope({})",
196 self.ccx.tcx().map.node_to_string(cleanup_scope));
198 assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
200 let scope = self.pop_scope();
201 self.trans_scope_cleanups(bcx, &scope)
204 /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
205 /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
206 /// branching to a block generated by `normal_exit_block`.
207 fn pop_loop_cleanup_scope(&self,
208 cleanup_scope: ast::NodeId) {
209 debug!("pop_loop_cleanup_scope({})",
210 self.ccx.tcx().map.node_to_string(cleanup_scope));
212 assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
214 let _ = self.pop_scope();
217 /// Removes the top cleanup scope from the stack without executing its cleanups. The top
218 /// cleanup scope must be the temporary scope `custom_scope`.
219 fn pop_custom_cleanup_scope(&self,
220 custom_scope: CustomScopeIndex) {
221 debug!("pop_custom_cleanup_scope({})", custom_scope.index);
222 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
223 let _ = self.pop_scope();
226 /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
227 /// generates the code to do its cleanups for normal exit.
228 fn pop_and_trans_custom_cleanup_scope(&self,
229 bcx: Block<'blk, 'tcx>,
230 custom_scope: CustomScopeIndex)
231 -> Block<'blk, 'tcx> {
232 debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
233 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
235 let scope = self.pop_scope();
236 self.trans_scope_cleanups(bcx, &scope)
239 /// Returns the id of the top-most loop scope
240 fn top_loop_scope(&self) -> ast::NodeId {
241 for scope in self.scopes.borrow().iter().rev() {
242 if let LoopScopeKind(id, _) = scope.kind {
246 self.ccx.sess().bug("no loop scope found");
249 /// Returns a block to branch to which will perform all pending cleanups and then
250 /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
251 fn normal_exit_block(&'blk self,
252 cleanup_scope: ast::NodeId,
253 exit: uint) -> BasicBlockRef {
254 self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
257 /// Returns a block to branch to which will perform all pending cleanups and then return from
259 fn return_exit_block(&'blk self) -> BasicBlockRef {
260 self.trans_cleanups_to_exit_scope(ReturnExit)
263 fn schedule_lifetime_end(&self,
264 cleanup_scope: ScopeId,
266 let drop = box LifetimeEnd {
270 debug!("schedule_lifetime_end({}, val={})",
272 self.ccx.tn().val_to_string(val));
274 self.schedule_clean(cleanup_scope, drop as CleanupObj);
277 /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
278 fn schedule_drop_mem(&self,
279 cleanup_scope: ScopeId,
282 if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
283 let drop = box DropValue {
285 must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
291 debug!("schedule_drop_mem({}, val={}, ty={})",
293 self.ccx.tn().val_to_string(val),
294 ty.repr(self.ccx.tcx()));
296 self.schedule_clean(cleanup_scope, drop as CleanupObj);
299 /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
300 fn schedule_drop_and_zero_mem(&self,
301 cleanup_scope: ScopeId,
304 if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
305 let drop = box DropValue {
307 must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
313 debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
315 self.ccx.tn().val_to_string(val),
316 ty.repr(self.ccx.tcx()),
319 self.schedule_clean(cleanup_scope, drop as CleanupObj);
322 /// Schedules a (deep) drop of `val`, which is an instance of `ty`
323 fn schedule_drop_immediate(&self,
324 cleanup_scope: ScopeId,
328 if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
329 let drop = box DropValue {
331 must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
337 debug!("schedule_drop_immediate({}, val={}, ty={})",
339 self.ccx.tn().val_to_string(val),
340 ty.repr(self.ccx.tcx()));
342 self.schedule_clean(cleanup_scope, drop as CleanupObj);
345 /// Schedules a call to `free(val)`. Note that this is a shallow operation.
346 fn schedule_free_value(&self,
347 cleanup_scope: ScopeId,
350 content_ty: Ty<'tcx>) {
351 let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
353 debug!("schedule_free_value({}, val={}, heap={})",
355 self.ccx.tn().val_to_string(val),
358 self.schedule_clean(cleanup_scope, drop as CleanupObj);
361 /// Schedules a call to `free(val)`. Note that this is a shallow operation.
362 fn schedule_free_slice(&self,
363 cleanup_scope: ScopeId,
368 let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
370 debug!("schedule_free_slice({}, val={}, heap={})",
372 self.ccx.tn().val_to_string(val),
375 self.schedule_clean(cleanup_scope, drop as CleanupObj);
378 fn schedule_clean(&self,
379 cleanup_scope: ScopeId,
380 cleanup: CleanupObj<'tcx>) {
381 match cleanup_scope {
382 AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
383 CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
387 /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
388 /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
390 fn schedule_clean_in_ast_scope(&self,
391 cleanup_scope: ast::NodeId,
392 cleanup: CleanupObj<'tcx>) {
393 debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
396 for scope in self.scopes.borrow_mut().iter_mut().rev() {
397 if scope.kind.is_ast_with_id(cleanup_scope) {
398 scope.cleanups.push(cleanup);
399 scope.clear_cached_exits();
402 // will be adding a cleanup to some enclosing scope
403 scope.clear_cached_exits();
408 format!("no cleanup scope {} found",
409 self.ccx.tcx().map.node_to_string(cleanup_scope))[]);
412 /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
413 fn schedule_clean_in_custom_scope(&self,
414 custom_scope: CustomScopeIndex,
415 cleanup: CleanupObj<'tcx>) {
416 debug!("schedule_clean_in_custom_scope(custom_scope={})",
419 assert!(self.is_valid_custom_scope(custom_scope));
421 let mut scopes = self.scopes.borrow_mut();
422 let scope = &mut (*scopes)[custom_scope.index];
423 scope.cleanups.push(cleanup);
424 scope.clear_cached_exits();
427 /// Returns true if there are pending cleanups that should execute on panic.
428 fn needs_invoke(&self) -> bool {
429 self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
432 /// Returns a basic block to branch to in the event of a panic. This block will run the panic
433 /// cleanups and eventually invoke the LLVM `Resume` instruction.
434 fn get_landing_pad(&'blk self) -> BasicBlockRef {
435 let _icx = base::push_ctxt("get_landing_pad");
437 debug!("get_landing_pad");
439 let orig_scopes_len = self.scopes_len();
440 assert!(orig_scopes_len > 0);
442 // Remove any scopes that do not have cleanups on panic:
443 let mut popped_scopes = vec!();
444 while !self.top_scope(|s| s.needs_invoke()) {
445 debug!("top scope does not need invoke");
446 popped_scopes.push(self.pop_scope());
449 // Check for an existing landing pad in the new topmost scope:
450 let llbb = self.get_or_create_landing_pad();
452 // Push the scopes we removed back on:
454 match popped_scopes.pop() {
455 Some(scope) => self.push_scope(scope),
460 assert_eq!(self.scopes_len(), orig_scopes_len);
466 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
467 /// Returns the id of the current top-most AST scope, if any.
468 fn top_ast_scope(&self) -> Option<ast::NodeId> {
469 for scope in self.scopes.borrow().iter().rev() {
471 CustomScopeKind | LoopScopeKind(..) => {}
480 fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
481 self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
484 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
485 self.is_valid_custom_scope(custom_scope) &&
486 custom_scope.index == self.scopes.borrow().len() - 1
489 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
490 let scopes = self.scopes.borrow();
491 custom_scope.index < scopes.len() &&
492 (*scopes)[custom_scope.index].kind.is_temp()
495 /// Generates the cleanups for `scope` into `bcx`
496 fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
497 bcx: Block<'blk, 'tcx>,
498 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
501 if !bcx.unreachable.get() {
502 for cleanup in scope.cleanups.iter().rev() {
503 bcx = cleanup.trans(bcx, scope.debug_loc);
509 fn scopes_len(&self) -> uint {
510 self.scopes.borrow().len()
513 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
514 self.scopes.borrow_mut().push(scope)
517 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
518 debug!("popping cleanup scope {}, {} scopes remaining",
519 self.top_scope(|s| s.block_name("")),
520 self.scopes_len() - 1);
522 self.scopes.borrow_mut().pop().unwrap()
525 fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
526 f(self.scopes.borrow().last().unwrap())
529 /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
530 /// unwind. This function will generate all cleanups between the top of the stack and the exit
531 /// `label` and return a basic block that the caller can branch to.
533 /// For example, if the current stack of cleanups were as follows:
542 /// and the `label` specifies a break from `Loop 23`, then this function would generate a
543 /// series of basic blocks as follows:
545 /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
547 /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
548 /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
549 /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
551 fn trans_cleanups_to_exit_scope(&'blk self,
552 label: EarlyExitLabel)
554 debug!("trans_cleanups_to_exit_scope label={} scopes={}",
555 label, self.scopes_len());
557 let orig_scopes_len = self.scopes_len();
559 let mut popped_scopes = vec!();
561 // First we pop off all the cleanup stacks that are
562 // traversed until the exit is reached, pushing them
563 // onto the side vector `popped_scopes`. No code is
564 // generated at this time.
566 // So, continuing the example from above, we would wind up
567 // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
568 // (Presuming that there are no cached exits)
570 if self.scopes_len() == 0 {
573 // Generate a block that will `Resume`.
574 let prev_bcx = self.new_block(true, "resume", None);
575 let personality = self.personality.get().expect(
576 "create_landing_pad() should have set this");
577 build::Resume(prev_bcx,
578 build::Load(prev_bcx, personality));
579 prev_llbb = prev_bcx.llbb;
584 prev_llbb = self.get_llreturn();
589 self.ccx.sess().bug(format!(
590 "cannot exit from scope {}, \
591 not in scope", id)[]);
596 // Check if we have already cached the unwinding of this
597 // scope for this label. If so, we can stop popping scopes
598 // and branch to the cached label, since it contains the
599 // cleanups for any subsequent scopes.
600 match self.top_scope(|s| s.cached_early_exit(label)) {
601 Some(cleanup_block) => {
602 prev_llbb = cleanup_block;
608 // Pop off the scope, since we will be generating
609 // unwinding code for it. If we are searching for a loop exit,
610 // and this scope is that loop, then stop popping and set
611 // `prev_llbb` to the appropriate exit block from the loop.
612 popped_scopes.push(self.pop_scope());
613 let scope = popped_scopes.last().unwrap();
615 UnwindExit | ReturnExit => { }
616 LoopExit(id, exit) => {
617 match scope.kind.early_exit_block(id, exit) {
619 prev_llbb = exitllbb;
629 debug!("trans_cleanups_to_exit_scope: popped {} scopes",
630 popped_scopes.len());
632 // Now push the popped scopes back on. As we go,
633 // we track in `prev_llbb` the exit to which this scope
634 // should branch when it's done.
636 // So, continuing with our example, we will start out with
637 // `prev_llbb` being set to `break_blk` (or possibly a cached
638 // early exit). We will then pop the scopes from `popped_scopes`
639 // and generate a basic block for each one, prepending it in the
640 // series and updating `prev_llbb`. So we begin by popping `Custom 2`
641 // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
642 // branch to `prev_llbb == break_blk`, giving us a sequence like:
644 // Cleanup(Custom 2) -> prev_llbb
646 // We then pop `AST 24` and repeat the process, giving us the sequence:
648 // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
650 // At this point, `popped_scopes` is empty, and so the final block
651 // that we return to the user is `Cleanup(AST 24)`.
652 while !popped_scopes.is_empty() {
653 let mut scope = popped_scopes.pop().unwrap();
655 if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
657 let name = scope.block_name("clean");
658 debug!("generating cleanups for {}", name);
659 let bcx_in = self.new_block(label.is_unwind(),
662 let mut bcx_out = bcx_in;
663 for cleanup in scope.cleanups.iter().rev() {
664 if cleanup_is_suitable_for(&**cleanup, label) {
665 bcx_out = cleanup.trans(bcx_out,
669 build::Br(bcx_out, prev_llbb);
670 prev_llbb = bcx_in.llbb;
672 debug!("no suitable cleanups in {}",
673 scope.block_name("clean"));
676 scope.add_cached_early_exit(label, prev_llbb);
677 self.push_scope(scope);
680 debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
682 assert_eq!(self.scopes_len(), orig_scopes_len);
686 /// Creates a landing pad for the top scope, if one does not exist. The landing pad will
687 /// perform all cleanups necessary for an unwind and then `resume` to continue error
690 /// landing_pad -> ... cleanups ... -> [resume]
692 /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
693 /// in this function itself.)
694 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
697 debug!("get_or_create_landing_pad");
699 // Check if a landing pad block exists; if not, create one.
701 let mut scopes = self.scopes.borrow_mut();
702 let last_scope = scopes.last_mut().unwrap();
703 match last_scope.cached_landing_pad {
704 Some(llbb) => { return llbb; }
706 let name = last_scope.block_name("unwind");
707 pad_bcx = self.new_block(true, name[], None);
708 last_scope.cached_landing_pad = Some(pad_bcx.llbb);
713 // The landing pad return type (the type being propagated). Not sure what
714 // this represents but it's determined by the personality function and
715 // this is what the EH proposal example uses.
716 let llretty = Type::struct_(self.ccx,
717 &[Type::i8p(self.ccx), Type::i32(self.ccx)],
720 // The exception handling personality function.
722 // If our compilation unit has the `eh_personality` lang item somewhere
723 // within it, then we just need to translate that. Otherwise, we're
724 // building an rlib which will depend on some upstream implementation of
725 // this function, so we just codegen a generic reference to it. We don't
726 // specify any of the types for the function, we just make it a symbol
727 // that LLVM can later use.
728 let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
729 Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
731 let mut personality = self.ccx.eh_personality().borrow_mut();
733 Some(llpersonality) => llpersonality,
735 let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
736 let f = base::decl_cdecl_fn(self.ccx,
737 "rust_eh_personality",
739 self.ccx.tcx().types.i32);
740 *personality = Some(f);
747 // The only landing pad clause will be 'cleanup'
748 let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
750 // The landing pad block is a cleanup
751 build::SetCleanup(pad_bcx, llretval);
753 // We store the retval in a function-central alloca, so that calls to
754 // Resume can find it.
755 match self.personality.get() {
757 build::Store(pad_bcx, llretval, addr);
760 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
761 self.personality.set(Some(addr));
762 build::Store(pad_bcx, llretval, addr);
766 // Generate the cleanup block and branch to it.
767 let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
768 build::Br(pad_bcx, cleanup_llbb);
774 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
775 fn new(kind: CleanupScopeKind<'blk, 'tcx>,
776 debug_loc: Option<NodeInfo>)
777 -> CleanupScope<'blk, 'tcx> {
780 debug_loc: debug_loc,
782 cached_early_exits: vec!(),
783 cached_landing_pad: None,
787 fn clear_cached_exits(&mut self) {
788 self.cached_early_exits = vec!();
789 self.cached_landing_pad = None;
792 fn cached_early_exit(&self,
793 label: EarlyExitLabel)
794 -> Option<BasicBlockRef> {
795 self.cached_early_exits.iter().
796 find(|e| e.label == label).
797 map(|e| e.cleanup_block)
800 fn add_cached_early_exit(&mut self,
801 label: EarlyExitLabel,
802 blk: BasicBlockRef) {
803 self.cached_early_exits.push(
804 CachedEarlyExit { label: label,
805 cleanup_block: blk });
808 /// True if this scope has cleanups that need unwinding
809 fn needs_invoke(&self) -> bool {
811 self.cached_landing_pad.is_some() ||
812 self.cleanups.iter().any(|c| c.must_unwind())
815 /// Returns a suitable name to use for the basic block that handles this cleanup scope
816 fn block_name(&self, prefix: &str) -> String {
818 CustomScopeKind => format!("{}_custom_", prefix),
819 AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
820 LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
824 pub fn drop_non_lifetime_clean(&mut self) {
825 self.cleanups.retain(|c| c.is_lifetime_end());
829 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
830 fn is_temp(&self) -> bool {
832 CustomScopeKind => true,
833 LoopScopeKind(..) | AstScopeKind(..) => false,
837 fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
839 CustomScopeKind | LoopScopeKind(..) => false,
840 AstScopeKind(i) => i == id
844 fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
846 CustomScopeKind | AstScopeKind(..) => false,
847 LoopScopeKind(i, _) => i == id
851 /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
852 fn early_exit_block(&self,
854 exit: uint) -> Option<BasicBlockRef> {
856 LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
862 impl EarlyExitLabel {
863 fn is_unwind(&self) -> bool {
871 ///////////////////////////////////////////////////////////////////////////
875 pub struct DropValue<'tcx> {
883 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
884 fn must_unwind(&self) -> bool {
888 fn clean_on_unwind(&self) -> bool {
892 fn is_lifetime_end(&self) -> bool {
896 fn trans<'blk>(&self,
897 bcx: Block<'blk, 'tcx>,
898 debug_loc: Option<NodeInfo>)
899 -> Block<'blk, 'tcx> {
900 let bcx = if self.is_immediate {
901 glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
903 glue::drop_ty(bcx, self.val, self.ty, debug_loc)
906 base::zero_mem(bcx, self.val, self.ty);
912 #[deriving(Copy, Show)]
918 pub struct FreeValue<'tcx> {
924 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
925 fn must_unwind(&self) -> bool {
929 fn clean_on_unwind(&self) -> bool {
933 fn is_lifetime_end(&self) -> bool {
937 fn trans<'blk>(&self,
938 bcx: Block<'blk, 'tcx>,
939 debug_loc: Option<NodeInfo>)
940 -> Block<'blk, 'tcx> {
941 apply_debug_loc(bcx.fcx, debug_loc);
945 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
952 pub struct FreeSlice {
959 impl<'tcx> Cleanup<'tcx> for FreeSlice {
960 fn must_unwind(&self) -> bool {
964 fn clean_on_unwind(&self) -> bool {
968 fn is_lifetime_end(&self) -> bool {
972 fn trans<'blk>(&self,
973 bcx: Block<'blk, 'tcx>,
974 debug_loc: Option<NodeInfo>)
975 -> Block<'blk, 'tcx> {
976 apply_debug_loc(bcx.fcx, debug_loc);
980 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
987 pub struct LifetimeEnd {
991 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
992 fn must_unwind(&self) -> bool {
996 fn clean_on_unwind(&self) -> bool {
1000 fn is_lifetime_end(&self) -> bool {
1004 fn trans<'blk>(&self,
1005 bcx: Block<'blk, 'tcx>,
1006 debug_loc: Option<NodeInfo>)
1007 -> Block<'blk, 'tcx> {
1008 apply_debug_loc(bcx.fcx, debug_loc);
1009 base::call_lifetime_end(bcx, self.ptr);
1014 pub fn temporary_scope(tcx: &ty::ctxt,
1017 match tcx.region_maps.temporary_scope(id) {
1019 let r = AstScope(scope.node_id());
1020 debug!("temporary_scope({}) = {}", id, r);
1024 tcx.sess.bug(format!("no temporary scope available for expr {}",
1030 pub fn var_scope(tcx: &ty::ctxt,
1033 let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1034 debug!("var_scope({}) = {}", id, r);
1038 fn cleanup_is_suitable_for(c: &Cleanup,
1039 label: EarlyExitLabel) -> bool {
1040 !label.is_unwind() || c.clean_on_unwind()
1043 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1045 Some(ref src_loc) => {
1046 debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1049 debuginfo::clear_source_location(fcx);
1054 ///////////////////////////////////////////////////////////////////////////
1055 // These traits just exist to put the methods into this file.
1057 pub trait CleanupMethods<'blk, 'tcx> {
1058 fn push_ast_cleanup_scope(&self, id: NodeInfo);
1059 fn push_loop_cleanup_scope(&self,
1061 exits: [Block<'blk, 'tcx>; EXIT_MAX]);
1062 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1063 fn push_custom_cleanup_scope_with_debug_loc(&self,
1064 debug_loc: NodeInfo)
1065 -> CustomScopeIndex;
1066 fn pop_and_trans_ast_cleanup_scope(&self,
1067 bcx: Block<'blk, 'tcx>,
1068 cleanup_scope: ast::NodeId)
1069 -> Block<'blk, 'tcx>;
1070 fn pop_loop_cleanup_scope(&self,
1071 cleanup_scope: ast::NodeId);
1072 fn pop_custom_cleanup_scope(&self,
1073 custom_scope: CustomScopeIndex);
1074 fn pop_and_trans_custom_cleanup_scope(&self,
1075 bcx: Block<'blk, 'tcx>,
1076 custom_scope: CustomScopeIndex)
1077 -> Block<'blk, 'tcx>;
1078 fn top_loop_scope(&self) -> ast::NodeId;
1079 fn normal_exit_block(&'blk self,
1080 cleanup_scope: ast::NodeId,
1081 exit: uint) -> BasicBlockRef;
1082 fn return_exit_block(&'blk self) -> BasicBlockRef;
1083 fn schedule_lifetime_end(&self,
1084 cleanup_scope: ScopeId,
1086 fn schedule_drop_mem(&self,
1087 cleanup_scope: ScopeId,
1090 fn schedule_drop_and_zero_mem(&self,
1091 cleanup_scope: ScopeId,
1094 fn schedule_drop_immediate(&self,
1095 cleanup_scope: ScopeId,
1098 fn schedule_free_value(&self,
1099 cleanup_scope: ScopeId,
1102 content_ty: Ty<'tcx>);
1103 fn schedule_free_slice(&self,
1104 cleanup_scope: ScopeId,
1109 fn schedule_clean(&self,
1110 cleanup_scope: ScopeId,
1111 cleanup: CleanupObj<'tcx>);
1112 fn schedule_clean_in_ast_scope(&self,
1113 cleanup_scope: ast::NodeId,
1114 cleanup: CleanupObj<'tcx>);
1115 fn schedule_clean_in_custom_scope(&self,
1116 custom_scope: CustomScopeIndex,
1117 cleanup: CleanupObj<'tcx>);
1118 fn needs_invoke(&self) -> bool;
1119 fn get_landing_pad(&'blk self) -> BasicBlockRef;
1122 trait CleanupHelperMethods<'blk, 'tcx> {
1123 fn top_ast_scope(&self) -> Option<ast::NodeId>;
1124 fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1125 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1126 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1127 fn trans_scope_cleanups(&self,
1128 bcx: Block<'blk, 'tcx>,
1129 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1130 fn trans_cleanups_to_exit_scope(&'blk self,
1131 label: EarlyExitLabel)
1133 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1134 fn scopes_len(&self) -> uint;
1135 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1136 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1137 fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;