1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Code pertaining to cleanup of temporaries as well as execution of
12 //! drop glue. See discussion in `doc.rs` for a high-level summary.
14 pub use self::ScopeId::*;
15 pub use self::CleanupScopeKind::*;
16 pub use self::EarlyExitLabel::*;
17 pub use self::Heap::*;
19 use llvm::{BasicBlockRef, ValueRef};
24 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
28 use trans::type_::Type;
29 use middle::ty::{mod, Ty};
32 use util::ppaux::Repr;
34 pub struct CleanupScope<'blk, 'tcx: 'blk> {
35 // The id of this cleanup scope. If the id is None,
36 // this is a *temporary scope* that is pushed during trans to
37 // cleanup miscellaneous garbage that trans may generate whose
38 // lifetime is a subset of some expression. See module doc for
40 kind: CleanupScopeKind<'blk, 'tcx>,
42 // Cleanups to run upon scope exit.
43 cleanups: Vec<CleanupObj<'tcx>>,
45 // The debug location any drop calls generated for this scope will be
47 debug_loc: Option<NodeInfo>,
49 cached_early_exits: Vec<CachedEarlyExit>,
50 cached_landing_pad: Option<BasicBlockRef>,
54 pub struct CustomScopeIndex {
58 impl Copy for CustomScopeIndex {}
60 pub const EXIT_BREAK: uint = 0;
61 pub const EXIT_LOOP: uint = 1;
62 pub const EXIT_MAX: uint = 2;
64 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
66 AstScopeKind(ast::NodeId),
67 LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>, ..EXIT_MAX])
70 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
71 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
73 CustomScopeKind => write!(f, "CustomScopeKind"),
74 AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
75 LoopScopeKind(nid, ref blks) => {
76 try!(write!(f, "LoopScopeKind({}, [", nid));
77 for blk in blks.iter() {
78 try!(write!(f, "{:p}, ", blk));
86 #[deriving(PartialEq, Show)]
87 pub enum EarlyExitLabel {
90 LoopExit(ast::NodeId, uint)
93 impl Copy for EarlyExitLabel {}
95 pub struct CachedEarlyExit {
96 label: EarlyExitLabel,
97 cleanup_block: BasicBlockRef,
100 impl Copy for CachedEarlyExit {}
102 pub trait Cleanup<'tcx> {
103 fn must_unwind(&self) -> bool;
104 fn clean_on_unwind(&self) -> bool;
105 fn is_lifetime_end(&self) -> bool;
106 fn trans<'blk>(&self,
107 bcx: Block<'blk, 'tcx>,
108 debug_loc: Option<NodeInfo>)
109 -> Block<'blk, 'tcx>;
112 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
116 AstScope(ast::NodeId),
117 CustomScope(CustomScopeIndex)
120 impl Copy for ScopeId {}
122 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
123 /// Invoked when we start to trans the code contained within a new cleanup scope.
124 fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
125 debug!("push_ast_cleanup_scope({})",
126 self.ccx.tcx().map.node_to_string(debug_loc.id));
128 // FIXME(#2202) -- currently closure bodies have a parent
129 // region, which messes up the assertion below, since there
130 // are no cleanup scopes on the stack at the start of
131 // trans'ing a closure body. I think though that this should
132 // eventually be fixed by closure bodies not having a parent
133 // region, though that's a touch unclear, and it might also be
134 // better just to narrow this assertion more (i.e., by
135 // excluding id's that correspond to closure bodies only). For
136 // now we just say that if there is already an AST scope on the stack,
137 // this new AST scope had better be its immediate child.
138 let top_scope = self.top_ast_scope();
139 if top_scope.is_some() {
143 .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
144 .map(|s|s.node_id()),
148 self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
152 fn push_loop_cleanup_scope(&self,
154 exits: [Block<'blk, 'tcx>, ..EXIT_MAX]) {
155 debug!("push_loop_cleanup_scope({})",
156 self.ccx.tcx().map.node_to_string(id));
157 assert_eq!(Some(id), self.top_ast_scope());
159 // Just copy the debuginfo source location from the enclosing scope
160 let debug_loc = self.scopes
166 self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
169 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
170 let index = self.scopes_len();
171 debug!("push_custom_cleanup_scope(): {}", index);
173 // Just copy the debuginfo source location from the enclosing scope
174 let debug_loc = self.scopes
177 .map(|opt_scope| opt_scope.debug_loc)
180 self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
181 CustomScopeIndex { index: index }
184 fn push_custom_cleanup_scope_with_debug_loc(&self,
186 -> CustomScopeIndex {
187 let index = self.scopes_len();
188 debug!("push_custom_cleanup_scope(): {}", index);
190 self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
191 CustomScopeIndex { index: index }
194 /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
195 /// stack, and generates the code to do its cleanups for normal exit.
196 fn pop_and_trans_ast_cleanup_scope(&self,
197 bcx: Block<'blk, 'tcx>,
198 cleanup_scope: ast::NodeId)
199 -> Block<'blk, 'tcx> {
200 debug!("pop_and_trans_ast_cleanup_scope({})",
201 self.ccx.tcx().map.node_to_string(cleanup_scope));
203 assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
205 let scope = self.pop_scope();
206 self.trans_scope_cleanups(bcx, &scope)
209 /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
210 /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
211 /// branching to a block generated by `normal_exit_block`.
212 fn pop_loop_cleanup_scope(&self,
213 cleanup_scope: ast::NodeId) {
214 debug!("pop_loop_cleanup_scope({})",
215 self.ccx.tcx().map.node_to_string(cleanup_scope));
217 assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
219 let _ = self.pop_scope();
222 /// Removes the top cleanup scope from the stack without executing its cleanups. The top
223 /// cleanup scope must be the temporary scope `custom_scope`.
224 fn pop_custom_cleanup_scope(&self,
225 custom_scope: CustomScopeIndex) {
226 debug!("pop_custom_cleanup_scope({})", custom_scope.index);
227 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
228 let _ = self.pop_scope();
231 /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
232 /// generates the code to do its cleanups for normal exit.
233 fn pop_and_trans_custom_cleanup_scope(&self,
234 bcx: Block<'blk, 'tcx>,
235 custom_scope: CustomScopeIndex)
236 -> Block<'blk, 'tcx> {
237 debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
238 assert!(self.is_valid_to_pop_custom_scope(custom_scope));
240 let scope = self.pop_scope();
241 self.trans_scope_cleanups(bcx, &scope)
244 /// Returns the id of the top-most loop scope
245 fn top_loop_scope(&self) -> ast::NodeId {
246 for scope in self.scopes.borrow().iter().rev() {
247 if let LoopScopeKind(id, _) = scope.kind {
251 self.ccx.sess().bug("no loop scope found");
254 /// Returns a block to branch to which will perform all pending cleanups and then
255 /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
256 fn normal_exit_block(&'blk self,
257 cleanup_scope: ast::NodeId,
258 exit: uint) -> BasicBlockRef {
259 self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
262 /// Returns a block to branch to which will perform all pending cleanups and then return from
264 fn return_exit_block(&'blk self) -> BasicBlockRef {
265 self.trans_cleanups_to_exit_scope(ReturnExit)
268 fn schedule_lifetime_end(&self,
269 cleanup_scope: ScopeId,
271 let drop = box LifetimeEnd {
275 debug!("schedule_lifetime_end({}, val={})",
277 self.ccx.tn().val_to_string(val));
279 self.schedule_clean(cleanup_scope, drop as CleanupObj);
282 /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
283 fn schedule_drop_mem(&self,
284 cleanup_scope: ScopeId,
287 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
288 let drop = box DropValue {
290 must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
296 debug!("schedule_drop_mem({}, val={}, ty={})",
298 self.ccx.tn().val_to_string(val),
299 ty.repr(self.ccx.tcx()));
301 self.schedule_clean(cleanup_scope, drop as CleanupObj);
304 /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
305 fn schedule_drop_and_zero_mem(&self,
306 cleanup_scope: ScopeId,
309 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
310 let drop = box DropValue {
312 must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
318 debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
320 self.ccx.tn().val_to_string(val),
321 ty.repr(self.ccx.tcx()),
324 self.schedule_clean(cleanup_scope, drop as CleanupObj);
327 /// Schedules a (deep) drop of `val`, which is an instance of `ty`
328 fn schedule_drop_immediate(&self,
329 cleanup_scope: ScopeId,
333 if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
334 let drop = box DropValue {
336 must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
342 debug!("schedule_drop_immediate({}, val={}, ty={})",
344 self.ccx.tn().val_to_string(val),
345 ty.repr(self.ccx.tcx()));
347 self.schedule_clean(cleanup_scope, drop as CleanupObj);
350 /// Schedules a call to `free(val)`. Note that this is a shallow operation.
351 fn schedule_free_value(&self,
352 cleanup_scope: ScopeId,
355 content_ty: Ty<'tcx>) {
356 let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
358 debug!("schedule_free_value({}, val={}, heap={})",
360 self.ccx.tn().val_to_string(val),
363 self.schedule_clean(cleanup_scope, drop as CleanupObj);
366 /// Schedules a call to `free(val)`. Note that this is a shallow operation.
367 fn schedule_free_slice(&self,
368 cleanup_scope: ScopeId,
373 let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
375 debug!("schedule_free_slice({}, val={}, heap={})",
377 self.ccx.tn().val_to_string(val),
380 self.schedule_clean(cleanup_scope, drop as CleanupObj);
383 fn schedule_clean(&self,
384 cleanup_scope: ScopeId,
385 cleanup: CleanupObj<'tcx>) {
386 match cleanup_scope {
387 AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
388 CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
392 /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
393 /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
395 fn schedule_clean_in_ast_scope(&self,
396 cleanup_scope: ast::NodeId,
397 cleanup: CleanupObj<'tcx>) {
398 debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
401 for scope in self.scopes.borrow_mut().iter_mut().rev() {
402 if scope.kind.is_ast_with_id(cleanup_scope) {
403 scope.cleanups.push(cleanup);
404 scope.clear_cached_exits();
407 // will be adding a cleanup to some enclosing scope
408 scope.clear_cached_exits();
413 format!("no cleanup scope {} found",
414 self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
417 /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
418 fn schedule_clean_in_custom_scope(&self,
419 custom_scope: CustomScopeIndex,
420 cleanup: CleanupObj<'tcx>) {
421 debug!("schedule_clean_in_custom_scope(custom_scope={})",
424 assert!(self.is_valid_custom_scope(custom_scope));
426 let mut scopes = self.scopes.borrow_mut();
427 let scope = &mut (*scopes)[custom_scope.index];
428 scope.cleanups.push(cleanup);
429 scope.clear_cached_exits();
432 /// Returns true if there are pending cleanups that should execute on panic.
433 fn needs_invoke(&self) -> bool {
434 self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
437 /// Returns a basic block to branch to in the event of a panic. This block will run the panic
438 /// cleanups and eventually invoke the LLVM `Resume` instruction.
439 fn get_landing_pad(&'blk self) -> BasicBlockRef {
440 let _icx = base::push_ctxt("get_landing_pad");
442 debug!("get_landing_pad");
444 let orig_scopes_len = self.scopes_len();
445 assert!(orig_scopes_len > 0);
447 // Remove any scopes that do not have cleanups on panic:
448 let mut popped_scopes = vec!();
449 while !self.top_scope(|s| s.needs_invoke()) {
450 debug!("top scope does not need invoke");
451 popped_scopes.push(self.pop_scope());
454 // Check for an existing landing pad in the new topmost scope:
455 let llbb = self.get_or_create_landing_pad();
457 // Push the scopes we removed back on:
459 match popped_scopes.pop() {
460 Some(scope) => self.push_scope(scope),
465 assert_eq!(self.scopes_len(), orig_scopes_len);
471 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
472 /// Returns the id of the current top-most AST scope, if any.
473 fn top_ast_scope(&self) -> Option<ast::NodeId> {
474 for scope in self.scopes.borrow().iter().rev() {
476 CustomScopeKind | LoopScopeKind(..) => {}
485 fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
486 self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
489 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
490 self.is_valid_custom_scope(custom_scope) &&
491 custom_scope.index == self.scopes.borrow().len() - 1
494 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
495 let scopes = self.scopes.borrow();
496 custom_scope.index < scopes.len() &&
497 (*scopes)[custom_scope.index].kind.is_temp()
500 /// Generates the cleanups for `scope` into `bcx`
501 fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
502 bcx: Block<'blk, 'tcx>,
503 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
506 if !bcx.unreachable.get() {
507 for cleanup in scope.cleanups.iter().rev() {
508 bcx = cleanup.trans(bcx, scope.debug_loc);
514 fn scopes_len(&self) -> uint {
515 self.scopes.borrow().len()
518 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
519 self.scopes.borrow_mut().push(scope)
522 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
523 debug!("popping cleanup scope {}, {} scopes remaining",
524 self.top_scope(|s| s.block_name("")),
525 self.scopes_len() - 1);
527 self.scopes.borrow_mut().pop().unwrap()
530 fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R {
531 f(self.scopes.borrow().last().unwrap())
534 /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
535 /// unwind. This function will generate all cleanups between the top of the stack and the exit
536 /// `label` and return a basic block that the caller can branch to.
538 /// For example, if the current stack of cleanups were as follows:
547 /// and the `label` specifies a break from `Loop 23`, then this function would generate a
548 /// series of basic blocks as follows:
550 /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
552 /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
553 /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
554 /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
556 fn trans_cleanups_to_exit_scope(&'blk self,
557 label: EarlyExitLabel)
559 debug!("trans_cleanups_to_exit_scope label={} scopes={}",
560 label, self.scopes_len());
562 let orig_scopes_len = self.scopes_len();
564 let mut popped_scopes = vec!();
566 // First we pop off all the cleanup stacks that are
567 // traversed until the exit is reached, pushing them
568 // onto the side vector `popped_scopes`. No code is
569 // generated at this time.
571 // So, continuing the example from above, we would wind up
572 // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
573 // (Presuming that there are no cached exits)
575 if self.scopes_len() == 0 {
578 // Generate a block that will `Resume`.
579 let prev_bcx = self.new_block(true, "resume", None);
580 let personality = self.personality.get().expect(
581 "create_landing_pad() should have set this");
582 build::Resume(prev_bcx,
583 build::Load(prev_bcx, personality));
584 prev_llbb = prev_bcx.llbb;
589 prev_llbb = self.get_llreturn();
594 self.ccx.sess().bug(format!(
595 "cannot exit from scope {}, \
596 not in scope", id).as_slice());
601 // Check if we have already cached the unwinding of this
602 // scope for this label. If so, we can stop popping scopes
603 // and branch to the cached label, since it contains the
604 // cleanups for any subsequent scopes.
605 match self.top_scope(|s| s.cached_early_exit(label)) {
606 Some(cleanup_block) => {
607 prev_llbb = cleanup_block;
613 // Pop off the scope, since we will be generating
614 // unwinding code for it. If we are searching for a loop exit,
615 // and this scope is that loop, then stop popping and set
616 // `prev_llbb` to the appropriate exit block from the loop.
617 popped_scopes.push(self.pop_scope());
618 let scope = popped_scopes.last().unwrap();
620 UnwindExit | ReturnExit => { }
621 LoopExit(id, exit) => {
622 match scope.kind.early_exit_block(id, exit) {
624 prev_llbb = exitllbb;
634 debug!("trans_cleanups_to_exit_scope: popped {} scopes",
635 popped_scopes.len());
637 // Now push the popped scopes back on. As we go,
638 // we track in `prev_llbb` the exit to which this scope
639 // should branch when it's done.
641 // So, continuing with our example, we will start out with
642 // `prev_llbb` being set to `break_blk` (or possibly a cached
643 // early exit). We will then pop the scopes from `popped_scopes`
644 // and generate a basic block for each one, prepending it in the
645 // series and updating `prev_llbb`. So we begin by popping `Custom 2`
646 // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
647 // branch to `prev_llbb == break_blk`, giving us a sequence like:
649 // Cleanup(Custom 2) -> prev_llbb
651 // We then pop `AST 24` and repeat the process, giving us the sequence:
653 // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
655 // At this point, `popped_scopes` is empty, and so the final block
656 // that we return to the user is `Cleanup(AST 24)`.
657 while !popped_scopes.is_empty() {
658 let mut scope = popped_scopes.pop().unwrap();
660 if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
662 let name = scope.block_name("clean");
663 debug!("generating cleanups for {}", name);
664 let bcx_in = self.new_block(label.is_unwind(),
667 let mut bcx_out = bcx_in;
668 for cleanup in scope.cleanups.iter().rev() {
669 if cleanup_is_suitable_for(&**cleanup, label) {
670 bcx_out = cleanup.trans(bcx_out,
674 build::Br(bcx_out, prev_llbb);
675 prev_llbb = bcx_in.llbb;
677 debug!("no suitable cleanups in {}",
678 scope.block_name("clean"));
681 scope.add_cached_early_exit(label, prev_llbb);
682 self.push_scope(scope);
685 debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
687 assert_eq!(self.scopes_len(), orig_scopes_len);
691 /// Creates a landing pad for the top scope, if one does not exist. The landing pad will
692 /// perform all cleanups necessary for an unwind and then `resume` to continue error
695 /// landing_pad -> ... cleanups ... -> [resume]
697 /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
698 /// in this function itself.)
699 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
702 debug!("get_or_create_landing_pad");
704 // Check if a landing pad block exists; if not, create one.
706 let mut scopes = self.scopes.borrow_mut();
707 let last_scope = scopes.last_mut().unwrap();
708 match last_scope.cached_landing_pad {
709 Some(llbb) => { return llbb; }
711 let name = last_scope.block_name("unwind");
712 pad_bcx = self.new_block(true, name.as_slice(), None);
713 last_scope.cached_landing_pad = Some(pad_bcx.llbb);
718 // The landing pad return type (the type being propagated). Not sure what
719 // this represents but it's determined by the personality function and
720 // this is what the EH proposal example uses.
721 let llretty = Type::struct_(self.ccx,
722 &[Type::i8p(self.ccx), Type::i32(self.ccx)],
725 // The exception handling personality function.
727 // If our compilation unit has the `eh_personality` lang item somewhere
728 // within it, then we just need to translate that. Otherwise, we're
729 // building an rlib which will depend on some upstream implementation of
730 // this function, so we just codegen a generic reference to it. We don't
731 // specify any of the types for the function, we just make it a symbol
732 // that LLVM can later use.
733 let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
734 Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
736 let mut personality = self.ccx.eh_personality().borrow_mut();
738 Some(llpersonality) => llpersonality,
740 let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
741 let f = base::decl_cdecl_fn(self.ccx,
742 "rust_eh_personality",
745 *personality = Some(f);
752 // The only landing pad clause will be 'cleanup'
753 let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
755 // The landing pad block is a cleanup
756 build::SetCleanup(pad_bcx, llretval);
758 // We store the retval in a function-central alloca, so that calls to
759 // Resume can find it.
760 match self.personality.get() {
762 build::Store(pad_bcx, llretval, addr);
765 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
766 self.personality.set(Some(addr));
767 build::Store(pad_bcx, llretval, addr);
771 // Generate the cleanup block and branch to it.
772 let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
773 build::Br(pad_bcx, cleanup_llbb);
779 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
780 fn new(kind: CleanupScopeKind<'blk, 'tcx>,
781 debug_loc: Option<NodeInfo>)
782 -> CleanupScope<'blk, 'tcx> {
785 debug_loc: debug_loc,
787 cached_early_exits: vec!(),
788 cached_landing_pad: None,
792 fn clear_cached_exits(&mut self) {
793 self.cached_early_exits = vec!();
794 self.cached_landing_pad = None;
797 fn cached_early_exit(&self,
798 label: EarlyExitLabel)
799 -> Option<BasicBlockRef> {
800 self.cached_early_exits.iter().
801 find(|e| e.label == label).
802 map(|e| e.cleanup_block)
805 fn add_cached_early_exit(&mut self,
806 label: EarlyExitLabel,
807 blk: BasicBlockRef) {
808 self.cached_early_exits.push(
809 CachedEarlyExit { label: label,
810 cleanup_block: blk });
813 /// True if this scope has cleanups that need unwinding
814 fn needs_invoke(&self) -> bool {
816 self.cached_landing_pad.is_some() ||
817 self.cleanups.iter().any(|c| c.must_unwind())
820 /// Returns a suitable name to use for the basic block that handles this cleanup scope
821 fn block_name(&self, prefix: &str) -> String {
823 CustomScopeKind => format!("{}_custom_", prefix),
824 AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
825 LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
829 pub fn drop_non_lifetime_clean(&mut self) {
830 self.cleanups.retain(|c| c.is_lifetime_end());
834 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
835 fn is_temp(&self) -> bool {
837 CustomScopeKind => true,
838 LoopScopeKind(..) | AstScopeKind(..) => false,
842 fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
844 CustomScopeKind | LoopScopeKind(..) => false,
845 AstScopeKind(i) => i == id
849 fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
851 CustomScopeKind | AstScopeKind(..) => false,
852 LoopScopeKind(i, _) => i == id
856 /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
857 fn early_exit_block(&self,
859 exit: uint) -> Option<BasicBlockRef> {
861 LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
867 impl EarlyExitLabel {
868 fn is_unwind(&self) -> bool {
876 ///////////////////////////////////////////////////////////////////////////
879 pub struct DropValue<'tcx> {
887 impl<'tcx> Copy for DropValue<'tcx> {}
889 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
890 fn must_unwind(&self) -> bool {
894 fn clean_on_unwind(&self) -> bool {
898 fn is_lifetime_end(&self) -> bool {
902 fn trans<'blk>(&self,
903 bcx: Block<'blk, 'tcx>,
904 debug_loc: Option<NodeInfo>)
905 -> Block<'blk, 'tcx> {
906 let bcx = if self.is_immediate {
907 glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
909 glue::drop_ty(bcx, self.val, self.ty, debug_loc)
912 base::zero_mem(bcx, self.val, self.ty);
923 impl Copy for Heap {}
925 pub struct FreeValue<'tcx> {
931 impl<'tcx> Copy for FreeValue<'tcx> {}
933 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
934 fn must_unwind(&self) -> bool {
938 fn clean_on_unwind(&self) -> bool {
942 fn is_lifetime_end(&self) -> bool {
946 fn trans<'blk>(&self,
947 bcx: Block<'blk, 'tcx>,
948 debug_loc: Option<NodeInfo>)
949 -> Block<'blk, 'tcx> {
950 apply_debug_loc(bcx.fcx, debug_loc);
954 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
960 pub struct FreeSlice {
967 impl Copy for FreeSlice {}
969 impl<'tcx> Cleanup<'tcx> for FreeSlice {
970 fn must_unwind(&self) -> bool {
974 fn clean_on_unwind(&self) -> bool {
978 fn is_lifetime_end(&self) -> bool {
982 fn trans<'blk, 'tcx>(&self,
983 bcx: Block<'blk, 'tcx>,
984 debug_loc: Option<NodeInfo>)
985 -> Block<'blk, 'tcx> {
986 apply_debug_loc(bcx.fcx, debug_loc);
990 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
996 pub struct LifetimeEnd {
1000 impl Copy for LifetimeEnd {}
1002 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
1003 fn must_unwind(&self) -> bool {
1007 fn clean_on_unwind(&self) -> bool {
1011 fn is_lifetime_end(&self) -> bool {
1015 fn trans<'blk, 'tcx>(&self,
1016 bcx: Block<'blk, 'tcx>,
1017 debug_loc: Option<NodeInfo>)
1018 -> Block<'blk, 'tcx> {
1019 apply_debug_loc(bcx.fcx, debug_loc);
1020 base::call_lifetime_end(bcx, self.ptr);
1025 pub fn temporary_scope(tcx: &ty::ctxt,
1028 match tcx.region_maps.temporary_scope(id) {
1030 let r = AstScope(scope.node_id());
1031 debug!("temporary_scope({}) = {}", id, r);
1035 tcx.sess.bug(format!("no temporary scope available for expr {}",
1041 pub fn var_scope(tcx: &ty::ctxt,
1044 let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1045 debug!("var_scope({}) = {}", id, r);
1049 fn cleanup_is_suitable_for(c: &Cleanup,
1050 label: EarlyExitLabel) -> bool {
1051 !label.is_unwind() || c.clean_on_unwind()
1054 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1056 Some(ref src_loc) => {
1057 debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1060 debuginfo::clear_source_location(fcx);
1065 ///////////////////////////////////////////////////////////////////////////
1066 // These traits just exist to put the methods into this file.
1068 pub trait CleanupMethods<'blk, 'tcx> {
1069 fn push_ast_cleanup_scope(&self, id: NodeInfo);
1070 fn push_loop_cleanup_scope(&self,
1072 exits: [Block<'blk, 'tcx>, ..EXIT_MAX]);
1073 fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1074 fn push_custom_cleanup_scope_with_debug_loc(&self,
1075 debug_loc: NodeInfo)
1076 -> CustomScopeIndex;
1077 fn pop_and_trans_ast_cleanup_scope(&self,
1078 bcx: Block<'blk, 'tcx>,
1079 cleanup_scope: ast::NodeId)
1080 -> Block<'blk, 'tcx>;
1081 fn pop_loop_cleanup_scope(&self,
1082 cleanup_scope: ast::NodeId);
1083 fn pop_custom_cleanup_scope(&self,
1084 custom_scope: CustomScopeIndex);
1085 fn pop_and_trans_custom_cleanup_scope(&self,
1086 bcx: Block<'blk, 'tcx>,
1087 custom_scope: CustomScopeIndex)
1088 -> Block<'blk, 'tcx>;
1089 fn top_loop_scope(&self) -> ast::NodeId;
1090 fn normal_exit_block(&'blk self,
1091 cleanup_scope: ast::NodeId,
1092 exit: uint) -> BasicBlockRef;
1093 fn return_exit_block(&'blk self) -> BasicBlockRef;
1094 fn schedule_lifetime_end(&self,
1095 cleanup_scope: ScopeId,
1097 fn schedule_drop_mem(&self,
1098 cleanup_scope: ScopeId,
1101 fn schedule_drop_and_zero_mem(&self,
1102 cleanup_scope: ScopeId,
1105 fn schedule_drop_immediate(&self,
1106 cleanup_scope: ScopeId,
1109 fn schedule_free_value(&self,
1110 cleanup_scope: ScopeId,
1113 content_ty: Ty<'tcx>);
1114 fn schedule_free_slice(&self,
1115 cleanup_scope: ScopeId,
1120 fn schedule_clean(&self,
1121 cleanup_scope: ScopeId,
1122 cleanup: CleanupObj<'tcx>);
1123 fn schedule_clean_in_ast_scope(&self,
1124 cleanup_scope: ast::NodeId,
1125 cleanup: CleanupObj<'tcx>);
1126 fn schedule_clean_in_custom_scope(&self,
1127 custom_scope: CustomScopeIndex,
1128 cleanup: CleanupObj<'tcx>);
1129 fn needs_invoke(&self) -> bool;
1130 fn get_landing_pad(&'blk self) -> BasicBlockRef;
1133 trait CleanupHelperMethods<'blk, 'tcx> {
1134 fn top_ast_scope(&self) -> Option<ast::NodeId>;
1135 fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1136 fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1137 fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1138 fn trans_scope_cleanups(&self,
1139 bcx: Block<'blk, 'tcx>,
1140 scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1141 fn trans_cleanups_to_exit_scope(&'blk self,
1142 label: EarlyExitLabel)
1144 fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1145 fn scopes_len(&self) -> uint;
1146 fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1147 fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1148 fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R;