1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Code pertaining to cleanup of temporaries as well as execution of
12 //! drop glue. See discussion in `doc.rs` for a high-level summary.
14 pub use self::ScopeId::*;
15 pub use self::CleanupScopeKind::*;
16 pub use self::EarlyExitLabel::*;
17 pub use self::Heap::*;
19 use llvm::{BasicBlockRef, ValueRef};
24 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
28 use trans::type_::Type;
29 use middle::ty::{self, Ty};
32 use util::ppaux::Repr;
34 pub struct CleanupScope<'blk, 'tcx: 'blk> {
35 // The id of this cleanup scope. If the id is None,
36 // this is a *temporary scope* that is pushed during trans to
37 // cleanup miscellaneous garbage that trans may generate whose
38 // lifetime is a subset of some expression. See module doc for
40 kind: CleanupScopeKind<'blk, 'tcx>,
42 // Cleanups to run upon scope exit.
43 cleanups: Vec<CleanupObj<'tcx>>,
45 // The debug location any drop calls generated for this scope will be
47 debug_loc: Option<NodeInfo>,
49 cached_early_exits: Vec<CachedEarlyExit>,
50 cached_landing_pad: Option<BasicBlockRef>,
54 pub struct CustomScopeIndex {
58 pub const EXIT_BREAK: uint = 0;
59 pub const EXIT_LOOP: uint = 1;
60 pub const EXIT_MAX: uint = 2;
62 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
64 AstScopeKind(ast::NodeId),
65 LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
68 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
69 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
71 CustomScopeKind => write!(f, "CustomScopeKind"),
72 AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
73 LoopScopeKind(nid, ref blks) => {
74 try!(write!(f, "LoopScopeKind({}, [", nid));
75 for blk in blks.iter() {
76 try!(write!(f, "{:p}, ", blk));
84 #[derive(Copy, PartialEq, Show)]
85 pub enum EarlyExitLabel {
88 LoopExit(ast::NodeId, uint)
92 pub struct CachedEarlyExit {
93 label: EarlyExitLabel,
94 cleanup_block: BasicBlockRef,
97 pub trait Cleanup<'tcx> {
98 fn must_unwind(&self) -> bool;
99 fn clean_on_unwind(&self) -> bool;
100 fn is_lifetime_end(&self) -> bool;
101 fn trans<'blk>(&self,
102 bcx: Block<'blk, 'tcx>,
103 debug_loc: Option<NodeInfo>)
104 -> Block<'blk, 'tcx>;
107 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
109 #[derive(Copy, Show)]
111 AstScope(ast::NodeId),
112 CustomScope(CustomScopeIndex)
115 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
116 /// Invoked when we start to trans the code contained within a new cleanup scope.
117 fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
118 debug!("push_ast_cleanup_scope({})",
119 self.ccx.tcx().map.node_to_string(debug_loc.id));
121 // FIXME(#2202) -- currently closure bodies have a parent
122 // region, which messes up the assertion below, since there
123 // are no cleanup scopes on the stack at the start of
124 // trans'ing a closure body. I think though that this should
125 // eventually be fixed by closure bodies not having a parent
126 // region, though that's a touch unclear, and it might also be
127 // better just to narrow this assertion more (i.e., by
128 // excluding id's that correspond to closure bodies only). For
129 // now we just say that if there is already an AST scope on the stack,
130 // this new AST scope had better be its immediate child.
131 let top_scope = self.top_ast_scope();
132 if top_scope.is_some() {
136 .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
137 .map(|s|s.node_id()),
141 self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
145 fn push_loop_cleanup_scope(&self,
147 exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
148 debug!("push_loop_cleanup_scope({})",
149 self.ccx.tcx().map.node_to_string(id));
150 assert_eq!(Some(id), self.top_ast_scope());
152 // Just copy the debuginfo source location from the enclosing scope
153 let debug_loc = self.scopes
159 self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
162 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
163 let index = self.scopes_len();
164 debug!("push_custom_cleanup_scope(): {}", index);
166 // Just copy the debuginfo source location from the enclosing scope
167 let debug_loc = self.scopes
170 .map(|opt_scope| opt_scope.debug_loc)
173 self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
174 CustomScopeIndex { index: index }
177 fn push_custom_cleanup_scope_with_debug_loc(&self,
179 -> CustomScopeIndex {
180 let index = self.scopes_len();
181 debug!("push_custom_cleanup_scope(): {}", index);
183 self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
184 CustomScopeIndex { index: index }
187 /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
188 /// stack, and generates the code to do its cleanups for normal exit.
189 fn pop_and_trans_ast_cleanup_scope(&self,
190 bcx: Block<'blk, 'tcx>,
191 cleanup_scope: ast::NodeId)
192 -> Block<'blk, 'tcx> {
193 debug!("pop_and_trans_ast_cleanup_scope({})",
194 self.ccx.tcx().map.node_to_string(cleanup_scope));
196 assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
198 let scope = self.pop_scope();
199 self.trans_scope_cleanups(bcx, &scope)
202 /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
203 /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
204 /// branching to a block generated by `normal_exit_block`.
205 fn pop_loop_cleanup_scope(&self,
206 cleanup_scope: ast::NodeId) {
207 debug!("pop_loop_cleanup_scope({})",
208 self.ccx.tcx().map.node_to_string(cleanup_scope));
210 assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
212 let _ = self.pop_scope();
215 /// Removes the top cleanup scope from the stack without executing its cleanups. The top
216 /// cleanup scope must be the temporary scope `custom_scope`.
217 fn pop_custom_cleanup_scope(&self,
218 custom_scope: CustomScopeIndex) {
219 debug!("pop_custom_cleanup_scope({})", custom_scope.index);
220 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
221 let _ = self.pop_scope();
224 /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
225 /// generates the code to do its cleanups for normal exit.
226 fn pop_and_trans_custom_cleanup_scope(&self,
227 bcx: Block<'blk, 'tcx>,
228 custom_scope: CustomScopeIndex)
229 -> Block<'blk, 'tcx> {
230 debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
231 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
233 let scope = self.pop_scope();
234 self.trans_scope_cleanups(bcx, &scope)
237 /// Returns the id of the top-most loop scope
238 fn top_loop_scope(&self) -> ast::NodeId {
239 for scope in self.scopes.borrow().iter().rev() {
240 if let LoopScopeKind(id, _) = scope.kind {
244 self.ccx.sess().bug("no loop scope found");
247 /// Returns a block to branch to which will perform all pending cleanups and then
248 /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
249 fn normal_exit_block(&'blk self,
250 cleanup_scope: ast::NodeId,
251 exit: uint) -> BasicBlockRef {
252 self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
255 /// Returns a block to branch to which will perform all pending cleanups and then return from
257 fn return_exit_block(&'blk self) -> BasicBlockRef {
258 self.trans_cleanups_to_exit_scope(ReturnExit)
261 fn schedule_lifetime_end(&self,
262 cleanup_scope: ScopeId,
264 let drop = box LifetimeEnd {
268 debug!("schedule_lifetime_end({:?}, val={})",
270 self.ccx.tn().val_to_string(val));
272 self.schedule_clean(cleanup_scope, drop as CleanupObj);
275 /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
276 fn schedule_drop_mem(&self,
277 cleanup_scope: ScopeId,
280 if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
281 let drop = box DropValue {
283 must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
289 debug!("schedule_drop_mem({:?}, val={}, ty={})",
291 self.ccx.tn().val_to_string(val),
292 ty.repr(self.ccx.tcx()));
294 self.schedule_clean(cleanup_scope, drop as CleanupObj);
297 /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
298 fn schedule_drop_and_zero_mem(&self,
299 cleanup_scope: ScopeId,
302 if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
303 let drop = box DropValue {
305 must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
311 debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
313 self.ccx.tn().val_to_string(val),
314 ty.repr(self.ccx.tcx()),
317 self.schedule_clean(cleanup_scope, drop as CleanupObj);
320 /// Schedules a (deep) drop of `val`, which is an instance of `ty`
321 fn schedule_drop_immediate(&self,
322 cleanup_scope: ScopeId,
326 if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
327 let drop = box DropValue {
329 must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
335 debug!("schedule_drop_immediate({:?}, val={}, ty={:?})",
337 self.ccx.tn().val_to_string(val),
338 ty.repr(self.ccx.tcx()));
340 self.schedule_clean(cleanup_scope, drop as CleanupObj);
343 /// Schedules a call to `free(val)`. Note that this is a shallow operation.
344 fn schedule_free_value(&self,
345 cleanup_scope: ScopeId,
348 content_ty: Ty<'tcx>) {
349 let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
351 debug!("schedule_free_value({:?}, val={}, heap={:?})",
353 self.ccx.tn().val_to_string(val),
356 self.schedule_clean(cleanup_scope, drop as CleanupObj);
359 /// Schedules a call to `free(val)`. Note that this is a shallow operation.
360 fn schedule_free_slice(&self,
361 cleanup_scope: ScopeId,
366 let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
368 debug!("schedule_free_slice({:?}, val={}, heap={:?})",
370 self.ccx.tn().val_to_string(val),
373 self.schedule_clean(cleanup_scope, drop as CleanupObj);
376 fn schedule_clean(&self,
377 cleanup_scope: ScopeId,
378 cleanup: CleanupObj<'tcx>) {
379 match cleanup_scope {
380 AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
381 CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
385 /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
386 /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
388 fn schedule_clean_in_ast_scope(&self,
389 cleanup_scope: ast::NodeId,
390 cleanup: CleanupObj<'tcx>) {
391 debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
394 for scope in self.scopes.borrow_mut().iter_mut().rev() {
395 if scope.kind.is_ast_with_id(cleanup_scope) {
396 scope.cleanups.push(cleanup);
397 scope.clear_cached_exits();
400 // will be adding a cleanup to some enclosing scope
401 scope.clear_cached_exits();
406 format!("no cleanup scope {} found",
407 self.ccx.tcx().map.node_to_string(cleanup_scope)).index(&FullRange));
410 /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
411 fn schedule_clean_in_custom_scope(&self,
412 custom_scope: CustomScopeIndex,
413 cleanup: CleanupObj<'tcx>) {
414 debug!("schedule_clean_in_custom_scope(custom_scope={})",
417 assert!(self.is_valid_custom_scope(custom_scope));
419 let mut scopes = self.scopes.borrow_mut();
420 let scope = &mut (*scopes)[custom_scope.index];
421 scope.cleanups.push(cleanup);
422 scope.clear_cached_exits();
425 /// Returns true if there are pending cleanups that should execute on panic.
426 fn needs_invoke(&self) -> bool {
427 self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
430 /// Returns a basic block to branch to in the event of a panic. This block will run the panic
431 /// cleanups and eventually invoke the LLVM `Resume` instruction.
432 fn get_landing_pad(&'blk self) -> BasicBlockRef {
433 let _icx = base::push_ctxt("get_landing_pad");
435 debug!("get_landing_pad");
437 let orig_scopes_len = self.scopes_len();
438 assert!(orig_scopes_len > 0);
440 // Remove any scopes that do not have cleanups on panic:
441 let mut popped_scopes = vec!();
442 while !self.top_scope(|s| s.needs_invoke()) {
443 debug!("top scope does not need invoke");
444 popped_scopes.push(self.pop_scope());
447 // Check for an existing landing pad in the new topmost scope:
448 let llbb = self.get_or_create_landing_pad();
450 // Push the scopes we removed back on:
452 match popped_scopes.pop() {
453 Some(scope) => self.push_scope(scope),
458 assert_eq!(self.scopes_len(), orig_scopes_len);
464 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
465 /// Returns the id of the current top-most AST scope, if any.
466 fn top_ast_scope(&self) -> Option<ast::NodeId> {
467 for scope in self.scopes.borrow().iter().rev() {
469 CustomScopeKind | LoopScopeKind(..) => {}
478 fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
479 self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
482 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
483 self.is_valid_custom_scope(custom_scope) &&
484 custom_scope.index == self.scopes.borrow().len() - 1
487 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
488 let scopes = self.scopes.borrow();
489 custom_scope.index < scopes.len() &&
490 (*scopes)[custom_scope.index].kind.is_temp()
493 /// Generates the cleanups for `scope` into `bcx`
494 fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
495 bcx: Block<'blk, 'tcx>,
496 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
499 if !bcx.unreachable.get() {
500 for cleanup in scope.cleanups.iter().rev() {
501 bcx = cleanup.trans(bcx, scope.debug_loc);
507 fn scopes_len(&self) -> uint {
508 self.scopes.borrow().len()
511 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
512 self.scopes.borrow_mut().push(scope)
515 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
516 debug!("popping cleanup scope {}, {} scopes remaining",
517 self.top_scope(|s| s.block_name("")),
518 self.scopes_len() - 1);
520 self.scopes.borrow_mut().pop().unwrap()
523 fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
524 f(self.scopes.borrow().last().unwrap())
527 /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
528 /// unwind. This function will generate all cleanups between the top of the stack and the exit
529 /// `label` and return a basic block that the caller can branch to.
531 /// For example, if the current stack of cleanups were as follows:
540 /// and the `label` specifies a break from `Loop 23`, then this function would generate a
541 /// series of basic blocks as follows:
543 /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
545 /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
546 /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
547 /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
549 fn trans_cleanups_to_exit_scope(&'blk self,
550 label: EarlyExitLabel)
552 debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
553 label, self.scopes_len());
555 let orig_scopes_len = self.scopes_len();
557 let mut popped_scopes = vec!();
559 // First we pop off all the cleanup stacks that are
560 // traversed until the exit is reached, pushing them
561 // onto the side vector `popped_scopes`. No code is
562 // generated at this time.
564 // So, continuing the example from above, we would wind up
565 // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
566 // (Presuming that there are no cached exits)
568 if self.scopes_len() == 0 {
571 // Generate a block that will `Resume`.
572 let prev_bcx = self.new_block(true, "resume", None);
573 let personality = self.personality.get().expect(
574 "create_landing_pad() should have set this");
575 build::Resume(prev_bcx,
576 build::Load(prev_bcx, personality));
577 prev_llbb = prev_bcx.llbb;
582 prev_llbb = self.get_llreturn();
587 self.ccx.sess().bug(format!(
588 "cannot exit from scope {}, \
589 not in scope", id).index(&FullRange));
594 // Check if we have already cached the unwinding of this
595 // scope for this label. If so, we can stop popping scopes
596 // and branch to the cached label, since it contains the
597 // cleanups for any subsequent scopes.
598 match self.top_scope(|s| s.cached_early_exit(label)) {
599 Some(cleanup_block) => {
600 prev_llbb = cleanup_block;
606 // Pop off the scope, since we will be generating
607 // unwinding code for it. If we are searching for a loop exit,
608 // and this scope is that loop, then stop popping and set
609 // `prev_llbb` to the appropriate exit block from the loop.
610 popped_scopes.push(self.pop_scope());
611 let scope = popped_scopes.last().unwrap();
613 UnwindExit | ReturnExit => { }
614 LoopExit(id, exit) => {
615 match scope.kind.early_exit_block(id, exit) {
617 prev_llbb = exitllbb;
627 debug!("trans_cleanups_to_exit_scope: popped {} scopes",
628 popped_scopes.len());
630 // Now push the popped scopes back on. As we go,
631 // we track in `prev_llbb` the exit to which this scope
632 // should branch when it's done.
634 // So, continuing with our example, we will start out with
635 // `prev_llbb` being set to `break_blk` (or possibly a cached
636 // early exit). We will then pop the scopes from `popped_scopes`
637 // and generate a basic block for each one, prepending it in the
638 // series and updating `prev_llbb`. So we begin by popping `Custom 2`
639 // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
640 // branch to `prev_llbb == break_blk`, giving us a sequence like:
642 // Cleanup(Custom 2) -> prev_llbb
644 // We then pop `AST 24` and repeat the process, giving us the sequence:
646 // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
648 // At this point, `popped_scopes` is empty, and so the final block
649 // that we return to the user is `Cleanup(AST 24)`.
650 while !popped_scopes.is_empty() {
651 let mut scope = popped_scopes.pop().unwrap();
653 if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
655 let name = scope.block_name("clean");
656 debug!("generating cleanups for {}", name);
657 let bcx_in = self.new_block(label.is_unwind(),
658 name.index(&FullRange),
660 let mut bcx_out = bcx_in;
661 for cleanup in scope.cleanups.iter().rev() {
662 if cleanup_is_suitable_for(&**cleanup, label) {
663 bcx_out = cleanup.trans(bcx_out,
667 build::Br(bcx_out, prev_llbb);
668 prev_llbb = bcx_in.llbb;
670 debug!("no suitable cleanups in {}",
671 scope.block_name("clean"));
674 scope.add_cached_early_exit(label, prev_llbb);
675 self.push_scope(scope);
678 debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
680 assert_eq!(self.scopes_len(), orig_scopes_len);
684 /// Creates a landing pad for the top scope, if one does not exist. The landing pad will
685 /// perform all cleanups necessary for an unwind and then `resume` to continue error
688 /// landing_pad -> ... cleanups ... -> [resume]
690 /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
691 /// in this function itself.)
692 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
695 debug!("get_or_create_landing_pad");
697 // Check if a landing pad block exists; if not, create one.
699 let mut scopes = self.scopes.borrow_mut();
700 let last_scope = scopes.last_mut().unwrap();
701 match last_scope.cached_landing_pad {
702 Some(llbb) => { return llbb; }
704 let name = last_scope.block_name("unwind");
705 pad_bcx = self.new_block(true, name.index(&FullRange), None);
706 last_scope.cached_landing_pad = Some(pad_bcx.llbb);
711 // The landing pad return type (the type being propagated). Not sure what
712 // this represents but it's determined by the personality function and
713 // this is what the EH proposal example uses.
714 let llretty = Type::struct_(self.ccx,
715 &[Type::i8p(self.ccx), Type::i32(self.ccx)],
718 // The exception handling personality function.
720 // If our compilation unit has the `eh_personality` lang item somewhere
721 // within it, then we just need to translate that. Otherwise, we're
722 // building an rlib which will depend on some upstream implementation of
723 // this function, so we just codegen a generic reference to it. We don't
724 // specify any of the types for the function, we just make it a symbol
725 // that LLVM can later use.
726 let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
728 callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0),
729 pad_bcx.fcx.param_substs).val
732 let mut personality = self.ccx.eh_personality().borrow_mut();
734 Some(llpersonality) => llpersonality,
736 let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
737 let f = base::decl_cdecl_fn(self.ccx,
738 "rust_eh_personality",
740 self.ccx.tcx().types.i32);
741 *personality = Some(f);
748 // The only landing pad clause will be 'cleanup'
749 let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
751 // The landing pad block is a cleanup
752 build::SetCleanup(pad_bcx, llretval);
754 // We store the retval in a function-central alloca, so that calls to
755 // Resume can find it.
756 match self.personality.get() {
758 build::Store(pad_bcx, llretval, addr);
761 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
762 self.personality.set(Some(addr));
763 build::Store(pad_bcx, llretval, addr);
767 // Generate the cleanup block and branch to it.
768 let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
769 build::Br(pad_bcx, cleanup_llbb);
775 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
776 fn new(kind: CleanupScopeKind<'blk, 'tcx>,
777 debug_loc: Option<NodeInfo>)
778 -> CleanupScope<'blk, 'tcx> {
781 debug_loc: debug_loc,
783 cached_early_exits: vec!(),
784 cached_landing_pad: None,
788 fn clear_cached_exits(&mut self) {
789 self.cached_early_exits = vec!();
790 self.cached_landing_pad = None;
793 fn cached_early_exit(&self,
794 label: EarlyExitLabel)
795 -> Option<BasicBlockRef> {
796 self.cached_early_exits.iter().
797 find(|e| e.label == label).
798 map(|e| e.cleanup_block)
801 fn add_cached_early_exit(&mut self,
802 label: EarlyExitLabel,
803 blk: BasicBlockRef) {
804 self.cached_early_exits.push(
805 CachedEarlyExit { label: label,
806 cleanup_block: blk });
809 /// True if this scope has cleanups that need unwinding
810 fn needs_invoke(&self) -> bool {
812 self.cached_landing_pad.is_some() ||
813 self.cleanups.iter().any(|c| c.must_unwind())
816 /// Returns a suitable name to use for the basic block that handles this cleanup scope
817 fn block_name(&self, prefix: &str) -> String {
819 CustomScopeKind => format!("{}_custom_", prefix),
820 AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
821 LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
825 pub fn drop_non_lifetime_clean(&mut self) {
826 self.cleanups.retain(|c| c.is_lifetime_end());
830 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
831 fn is_temp(&self) -> bool {
833 CustomScopeKind => true,
834 LoopScopeKind(..) | AstScopeKind(..) => false,
838 fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
840 CustomScopeKind | LoopScopeKind(..) => false,
841 AstScopeKind(i) => i == id
845 fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
847 CustomScopeKind | AstScopeKind(..) => false,
848 LoopScopeKind(i, _) => i == id
852 /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
853 fn early_exit_block(&self,
855 exit: uint) -> Option<BasicBlockRef> {
857 LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
863 impl EarlyExitLabel {
864 fn is_unwind(&self) -> bool {
872 ///////////////////////////////////////////////////////////////////////////
876 pub struct DropValue<'tcx> {
884 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
885 fn must_unwind(&self) -> bool {
889 fn clean_on_unwind(&self) -> bool {
893 fn is_lifetime_end(&self) -> bool {
897 fn trans<'blk>(&self,
898 bcx: Block<'blk, 'tcx>,
899 debug_loc: Option<NodeInfo>)
900 -> Block<'blk, 'tcx> {
901 let bcx = if self.is_immediate {
902 glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
904 glue::drop_ty(bcx, self.val, self.ty, debug_loc)
907 base::zero_mem(bcx, self.val, self.ty);
913 #[derive(Copy, Show)]
919 pub struct FreeValue<'tcx> {
925 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
926 fn must_unwind(&self) -> bool {
930 fn clean_on_unwind(&self) -> bool {
934 fn is_lifetime_end(&self) -> bool {
938 fn trans<'blk>(&self,
939 bcx: Block<'blk, 'tcx>,
940 debug_loc: Option<NodeInfo>)
941 -> Block<'blk, 'tcx> {
942 apply_debug_loc(bcx.fcx, debug_loc);
946 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
953 pub struct FreeSlice {
960 impl<'tcx> Cleanup<'tcx> for FreeSlice {
961 fn must_unwind(&self) -> bool {
965 fn clean_on_unwind(&self) -> bool {
969 fn is_lifetime_end(&self) -> bool {
973 fn trans<'blk>(&self,
974 bcx: Block<'blk, 'tcx>,
975 debug_loc: Option<NodeInfo>)
976 -> Block<'blk, 'tcx> {
977 apply_debug_loc(bcx.fcx, debug_loc);
981 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
988 pub struct LifetimeEnd {
992 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
993 fn must_unwind(&self) -> bool {
997 fn clean_on_unwind(&self) -> bool {
1001 fn is_lifetime_end(&self) -> bool {
1005 fn trans<'blk>(&self,
1006 bcx: Block<'blk, 'tcx>,
1007 debug_loc: Option<NodeInfo>)
1008 -> Block<'blk, 'tcx> {
1009 apply_debug_loc(bcx.fcx, debug_loc);
1010 base::call_lifetime_end(bcx, self.ptr);
1015 pub fn temporary_scope(tcx: &ty::ctxt,
1018 match tcx.region_maps.temporary_scope(id) {
1020 let r = AstScope(scope.node_id());
1021 debug!("temporary_scope({}) = {:?}", id, r);
1025 tcx.sess.bug(format!("no temporary scope available for expr {}",
1026 id).index(&FullRange))
1031 pub fn var_scope(tcx: &ty::ctxt,
1034 let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1035 debug!("var_scope({}) = {:?}", id, r);
1039 fn cleanup_is_suitable_for(c: &Cleanup,
1040 label: EarlyExitLabel) -> bool {
1041 !label.is_unwind() || c.clean_on_unwind()
1044 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1046 Some(ref src_loc) => {
1047 debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1050 debuginfo::clear_source_location(fcx);
1055 ///////////////////////////////////////////////////////////////////////////
1056 // These traits just exist to put the methods into this file.
1058 pub trait CleanupMethods<'blk, 'tcx> {
1059 fn push_ast_cleanup_scope(&self, id: NodeInfo);
1060 fn push_loop_cleanup_scope(&self,
1062 exits: [Block<'blk, 'tcx>; EXIT_MAX]);
1063 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1064 fn push_custom_cleanup_scope_with_debug_loc(&self,
1065 debug_loc: NodeInfo)
1066 -> CustomScopeIndex;
1067 fn pop_and_trans_ast_cleanup_scope(&self,
1068 bcx: Block<'blk, 'tcx>,
1069 cleanup_scope: ast::NodeId)
1070 -> Block<'blk, 'tcx>;
1071 fn pop_loop_cleanup_scope(&self,
1072 cleanup_scope: ast::NodeId);
1073 fn pop_custom_cleanup_scope(&self,
1074 custom_scope: CustomScopeIndex);
1075 fn pop_and_trans_custom_cleanup_scope(&self,
1076 bcx: Block<'blk, 'tcx>,
1077 custom_scope: CustomScopeIndex)
1078 -> Block<'blk, 'tcx>;
1079 fn top_loop_scope(&self) -> ast::NodeId;
1080 fn normal_exit_block(&'blk self,
1081 cleanup_scope: ast::NodeId,
1082 exit: uint) -> BasicBlockRef;
1083 fn return_exit_block(&'blk self) -> BasicBlockRef;
1084 fn schedule_lifetime_end(&self,
1085 cleanup_scope: ScopeId,
1087 fn schedule_drop_mem(&self,
1088 cleanup_scope: ScopeId,
1091 fn schedule_drop_and_zero_mem(&self,
1092 cleanup_scope: ScopeId,
1095 fn schedule_drop_immediate(&self,
1096 cleanup_scope: ScopeId,
1099 fn schedule_free_value(&self,
1100 cleanup_scope: ScopeId,
1103 content_ty: Ty<'tcx>);
1104 fn schedule_free_slice(&self,
1105 cleanup_scope: ScopeId,
1110 fn schedule_clean(&self,
1111 cleanup_scope: ScopeId,
1112 cleanup: CleanupObj<'tcx>);
1113 fn schedule_clean_in_ast_scope(&self,
1114 cleanup_scope: ast::NodeId,
1115 cleanup: CleanupObj<'tcx>);
1116 fn schedule_clean_in_custom_scope(&self,
1117 custom_scope: CustomScopeIndex,
1118 cleanup: CleanupObj<'tcx>);
1119 fn needs_invoke(&self) -> bool;
1120 fn get_landing_pad(&'blk self) -> BasicBlockRef;
1123 trait CleanupHelperMethods<'blk, 'tcx> {
1124 fn top_ast_scope(&self) -> Option<ast::NodeId>;
1125 fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1126 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1127 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1128 fn trans_scope_cleanups(&self,
1129 bcx: Block<'blk, 'tcx>,
1130 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1131 fn trans_cleanups_to_exit_scope(&'blk self,
1132 label: EarlyExitLabel)
1134 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1135 fn scopes_len(&self) -> uint;
1136 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1137 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1138 fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;