]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/cleanup.rs
b0235be7497eae95dd301aee4e83c1409833ed44
[rust.git] / src / librustc_trans / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12  * Code pertaining to cleanup of temporaries as well as execution of
13  * drop glue. See discussion in `doc.rs` for a high-level summary.
14  */
15
16 pub use self::ScopeId::*;
17 pub use self::CleanupScopeKind::*;
18 pub use self::EarlyExitLabel::*;
19 pub use self::Heap::*;
20
21 use llvm::{BasicBlockRef, ValueRef};
22 use trans::base;
23 use trans::build;
24 use trans::callee;
25 use trans::common;
26 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
27 use trans::debuginfo;
28 use trans::glue;
29 use middle::region;
30 use trans::type_::Type;
31 use middle::ty::{mod, Ty};
32 use std::fmt;
33 use syntax::ast;
34 use util::ppaux::Repr;
35
36 pub struct CleanupScope<'blk, 'tcx: 'blk> {
37     // The id of this cleanup scope. If the id is None,
38     // this is a *temporary scope* that is pushed during trans to
39     // cleanup miscellaneous garbage that trans may generate whose
40     // lifetime is a subset of some expression.  See module doc for
41     // more details.
42     kind: CleanupScopeKind<'blk, 'tcx>,
43
44     // Cleanups to run upon scope exit.
45     cleanups: Vec<CleanupObj<'tcx>>,
46
47     // The debug location any drop calls generated for this scope will be
48     // associated with.
49     debug_loc: Option<NodeInfo>,
50
51     cached_early_exits: Vec<CachedEarlyExit>,
52     cached_landing_pad: Option<BasicBlockRef>,
53 }
54
55 #[deriving(Show)]
56 pub struct CustomScopeIndex {
57     index: uint
58 }
59
60 pub const EXIT_BREAK: uint = 0;
61 pub const EXIT_LOOP: uint = 1;
62 pub const EXIT_MAX: uint = 2;
63
64 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
65     CustomScopeKind,
66     AstScopeKind(ast::NodeId),
67     LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>, ..EXIT_MAX])
68 }
69
70 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
71     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72         match *self {
73             CustomScopeKind => write!(f, "CustomScopeKind"),
74             AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
75             LoopScopeKind(nid, ref blks) => {
76                 try!(write!(f, "LoopScopeKind({}, [", nid));
77                 for blk in blks.iter() {
78                     try!(write!(f, "{:p}, ", blk));
79                 }
80                 write!(f, "])")
81             }
82         }
83     }
84 }
85
86 #[deriving(PartialEq, Show)]
87 pub enum EarlyExitLabel {
88     UnwindExit,
89     ReturnExit,
90     LoopExit(ast::NodeId, uint)
91 }
92
93 pub struct CachedEarlyExit {
94     label: EarlyExitLabel,
95     cleanup_block: BasicBlockRef,
96 }
97
98 pub trait Cleanup<'tcx> {
99     fn must_unwind(&self) -> bool;
100     fn clean_on_unwind(&self) -> bool;
101     fn is_lifetime_end(&self) -> bool;
102     fn trans<'blk>(&self,
103                    bcx: Block<'blk, 'tcx>,
104                    debug_loc: Option<NodeInfo>)
105                    -> Block<'blk, 'tcx>;
106 }
107
108 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
109
110 #[deriving(Show)]
111 pub enum ScopeId {
112     AstScope(ast::NodeId),
113     CustomScope(CustomScopeIndex)
114 }
115
116 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
117     fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
118         /*!
119          * Invoked when we start to trans the code contained
120          * within a new cleanup scope.
121          */
122
123         debug!("push_ast_cleanup_scope({})",
124                self.ccx.tcx().map.node_to_string(debug_loc.id));
125
126         // FIXME(#2202) -- currently closure bodies have a parent
127         // region, which messes up the assertion below, since there
128         // are no cleanup scopes on the stack at the start of
129         // trans'ing a closure body.  I think though that this should
130         // eventually be fixed by closure bodies not having a parent
131         // region, though that's a touch unclear, and it might also be
132         // better just to narrow this assertion more (i.e., by
133         // excluding id's that correspond to closure bodies only). For
134         // now we just say that if there is already an AST scope on the stack,
135         // this new AST scope had better be its immediate child.
136         let top_scope = self.top_ast_scope();
137         if top_scope.is_some() {
138             assert_eq!(self.ccx
139                            .tcx()
140                            .region_maps
141                            .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
142                            .map(|s|s.node_id()),
143                        top_scope);
144         }
145
146         self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
147                                           Some(debug_loc)));
148     }
149
150     fn push_loop_cleanup_scope(&self,
151                                id: ast::NodeId,
152                                exits: [Block<'blk, 'tcx>, ..EXIT_MAX]) {
153         debug!("push_loop_cleanup_scope({})",
154                self.ccx.tcx().map.node_to_string(id));
155         assert_eq!(Some(id), self.top_ast_scope());
156
157         // Just copy the debuginfo source location from the enclosing scope
158         let debug_loc = self.scopes
159                             .borrow()
160                             .last()
161                             .unwrap()
162                             .debug_loc;
163
164         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
165     }
166
167     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
168         let index = self.scopes_len();
169         debug!("push_custom_cleanup_scope(): {}", index);
170
171         // Just copy the debuginfo source location from the enclosing scope
172         let debug_loc = self.scopes
173                             .borrow()
174                             .last()
175                             .map(|opt_scope| opt_scope.debug_loc)
176                             .unwrap_or(None);
177
178         self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
179         CustomScopeIndex { index: index }
180     }
181
182     fn push_custom_cleanup_scope_with_debug_loc(&self,
183                                                 debug_loc: NodeInfo)
184                                                 -> CustomScopeIndex {
185         let index = self.scopes_len();
186         debug!("push_custom_cleanup_scope(): {}", index);
187
188         self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
189         CustomScopeIndex { index: index }
190     }
191
192     fn pop_and_trans_ast_cleanup_scope(&self,
193                                        bcx: Block<'blk, 'tcx>,
194                                        cleanup_scope: ast::NodeId)
195                                        -> Block<'blk, 'tcx> {
196         /*!
197          * Removes the cleanup scope for id `cleanup_scope`, which
198          * must be at the top of the cleanup stack, and generates the
199          * code to do its cleanups for normal exit.
200          */
201
202         debug!("pop_and_trans_ast_cleanup_scope({})",
203                self.ccx.tcx().map.node_to_string(cleanup_scope));
204
205         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
206
207         let scope = self.pop_scope();
208         self.trans_scope_cleanups(bcx, &scope)
209     }
210
211     fn pop_loop_cleanup_scope(&self,
212                               cleanup_scope: ast::NodeId) {
213         /*!
214          * Removes the loop cleanup scope for id `cleanup_scope`, which
215          * must be at the top of the cleanup stack. Does not generate
216          * any cleanup code, since loop scopes should exit by
217          * branching to a block generated by `normal_exit_block`.
218          */
219
220         debug!("pop_loop_cleanup_scope({})",
221                self.ccx.tcx().map.node_to_string(cleanup_scope));
222
223         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
224
225         let _ = self.pop_scope();
226     }
227
228     fn pop_custom_cleanup_scope(&self,
229                                 custom_scope: CustomScopeIndex) {
230         /*!
231          * Removes the top cleanup scope from the stack without
232          * executing its cleanups. The top cleanup scope must
233          * be the temporary scope `custom_scope`.
234          */
235
236         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
237         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
238         let _ = self.pop_scope();
239     }
240
241     fn pop_and_trans_custom_cleanup_scope(&self,
242                                           bcx: Block<'blk, 'tcx>,
243                                           custom_scope: CustomScopeIndex)
244                                           -> Block<'blk, 'tcx> {
245         /*!
246          * Removes the top cleanup scope from the stack, which must be
247          * a temporary scope, and generates the code to do its
248          * cleanups for normal exit.
249          */
250
251         debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
252         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
253
254         let scope = self.pop_scope();
255         self.trans_scope_cleanups(bcx, &scope)
256     }
257
258     fn top_loop_scope(&self) -> ast::NodeId {
259         /*!
260          * Returns the id of the top-most loop scope
261          */
262
263         for scope in self.scopes.borrow().iter().rev() {
264             match scope.kind {
265                 LoopScopeKind(id, _) => {
266                     return id;
267                 }
268                 _ => {}
269             }
270         }
271         self.ccx.sess().bug("no loop scope found");
272     }
273
274     fn normal_exit_block(&'blk self,
275                          cleanup_scope: ast::NodeId,
276                          exit: uint) -> BasicBlockRef {
277         /*!
278          * Returns a block to branch to which will perform all pending
279          * cleanups and then break/continue (depending on `exit`) out
280          * of the loop with id `cleanup_scope`
281          */
282
283         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
284     }
285
286     fn return_exit_block(&'blk self) -> BasicBlockRef {
287         /*!
288          * Returns a block to branch to which will perform all pending
289          * cleanups and then return from this function
290          */
291
292         self.trans_cleanups_to_exit_scope(ReturnExit)
293     }
294
295     fn schedule_lifetime_end(&self,
296                              cleanup_scope: ScopeId,
297                              val: ValueRef) {
298         let drop = box LifetimeEnd {
299             ptr: val,
300         };
301
302         debug!("schedule_lifetime_end({}, val={})",
303                cleanup_scope,
304                self.ccx.tn().val_to_string(val));
305
306         self.schedule_clean(cleanup_scope, drop as CleanupObj);
307     }
308
309     fn schedule_drop_mem(&self,
310                          cleanup_scope: ScopeId,
311                          val: ValueRef,
312                          ty: Ty<'tcx>) {
313         /*!
314          * Schedules a (deep) drop of `val`, which is a pointer to an
315          * instance of `ty`
316          */
317
318         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
319         let drop = box DropValue {
320             is_immediate: false,
321             must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
322             val: val,
323             ty: ty,
324             zero: false
325         };
326
327         debug!("schedule_drop_mem({}, val={}, ty={})",
328                cleanup_scope,
329                self.ccx.tn().val_to_string(val),
330                ty.repr(self.ccx.tcx()));
331
332         self.schedule_clean(cleanup_scope, drop as CleanupObj);
333     }
334
335     fn schedule_drop_and_zero_mem(&self,
336                                   cleanup_scope: ScopeId,
337                                   val: ValueRef,
338                                   ty: Ty<'tcx>) {
339         /*!
340          * Schedules a (deep) drop and zero-ing of `val`, which is a pointer
341          * to an instance of `ty`
342          */
343
344         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
345         let drop = box DropValue {
346             is_immediate: false,
347             must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
348             val: val,
349             ty: ty,
350             zero: true
351         };
352
353         debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
354                cleanup_scope,
355                self.ccx.tn().val_to_string(val),
356                ty.repr(self.ccx.tcx()),
357                true);
358
359         self.schedule_clean(cleanup_scope, drop as CleanupObj);
360     }
361
362     fn schedule_drop_immediate(&self,
363                                cleanup_scope: ScopeId,
364                                val: ValueRef,
365                                ty: Ty<'tcx>) {
366         /*!
367          * Schedules a (deep) drop of `val`, which is an instance of `ty`
368          */
369
370         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
371         let drop = box DropValue {
372             is_immediate: true,
373             must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
374             val: val,
375             ty: ty,
376             zero: false
377         };
378
379         debug!("schedule_drop_immediate({}, val={}, ty={})",
380                cleanup_scope,
381                self.ccx.tn().val_to_string(val),
382                ty.repr(self.ccx.tcx()));
383
384         self.schedule_clean(cleanup_scope, drop as CleanupObj);
385     }
386
387     fn schedule_free_value(&self,
388                            cleanup_scope: ScopeId,
389                            val: ValueRef,
390                            heap: Heap,
391                            content_ty: Ty<'tcx>) {
392         /*!
393          * Schedules a call to `free(val)`. Note that this is a shallow
394          * operation.
395          */
396
397         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
398
399         debug!("schedule_free_value({}, val={}, heap={})",
400                cleanup_scope,
401                self.ccx.tn().val_to_string(val),
402                heap);
403
404         self.schedule_clean(cleanup_scope, drop as CleanupObj);
405     }
406
407     fn schedule_free_slice(&self,
408                            cleanup_scope: ScopeId,
409                            val: ValueRef,
410                            size: ValueRef,
411                            align: ValueRef,
412                            heap: Heap) {
413         /*!
414          * Schedules a call to `free(val)`. Note that this is a shallow
415          * operation.
416          */
417
418         let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
419
420         debug!("schedule_free_slice({}, val={}, heap={})",
421                cleanup_scope,
422                self.ccx.tn().val_to_string(val),
423                heap);
424
425         self.schedule_clean(cleanup_scope, drop as CleanupObj);
426     }
427
428     fn schedule_clean(&self,
429                       cleanup_scope: ScopeId,
430                       cleanup: CleanupObj<'tcx>) {
431         match cleanup_scope {
432             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
433             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
434         }
435     }
436
437     fn schedule_clean_in_ast_scope(&self,
438                                    cleanup_scope: ast::NodeId,
439                                    cleanup: CleanupObj<'tcx>) {
440         /*!
441          * Schedules a cleanup to occur upon exit from `cleanup_scope`.
442          * If `cleanup_scope` is not provided, then the cleanup is scheduled
443          * in the topmost scope, which must be a temporary scope.
444          */
445
446         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
447                cleanup_scope);
448
449         for scope in self.scopes.borrow_mut().iter_mut().rev() {
450             if scope.kind.is_ast_with_id(cleanup_scope) {
451                 scope.cleanups.push(cleanup);
452                 scope.clear_cached_exits();
453                 return;
454             } else {
455                 // will be adding a cleanup to some enclosing scope
456                 scope.clear_cached_exits();
457             }
458         }
459
460         self.ccx.sess().bug(
461             format!("no cleanup scope {} found",
462                     self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
463     }
464
465     fn schedule_clean_in_custom_scope(&self,
466                                       custom_scope: CustomScopeIndex,
467                                       cleanup: CleanupObj<'tcx>) {
468         /*!
469          * Schedules a cleanup to occur in the top-most scope,
470          * which must be a temporary scope.
471          */
472
473         debug!("schedule_clean_in_custom_scope(custom_scope={})",
474                custom_scope.index);
475
476         assert!(self.is_valid_custom_scope(custom_scope));
477
478         let mut scopes = self.scopes.borrow_mut();
479         let scope = &mut (*scopes)[custom_scope.index];
480         scope.cleanups.push(cleanup);
481         scope.clear_cached_exits();
482     }
483
484     fn needs_invoke(&self) -> bool {
485         /*!
486          * Returns true if there are pending cleanups that should
487          * execute on panic.
488          */
489
490         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
491     }
492
493     fn get_landing_pad(&'blk self) -> BasicBlockRef {
494         /*!
495          * Returns a basic block to branch to in the event of a panic.
496          * This block will run the panic cleanups and eventually
497          * invoke the LLVM `Resume` instruction.
498          */
499
500         let _icx = base::push_ctxt("get_landing_pad");
501
502         debug!("get_landing_pad");
503
504         let orig_scopes_len = self.scopes_len();
505         assert!(orig_scopes_len > 0);
506
507         // Remove any scopes that do not have cleanups on panic:
508         let mut popped_scopes = vec!();
509         while !self.top_scope(|s| s.needs_invoke()) {
510             debug!("top scope does not need invoke");
511             popped_scopes.push(self.pop_scope());
512         }
513
514         // Check for an existing landing pad in the new topmost scope:
515         let llbb = self.get_or_create_landing_pad();
516
517         // Push the scopes we removed back on:
518         loop {
519             match popped_scopes.pop() {
520                 Some(scope) => self.push_scope(scope),
521                 None => break
522             }
523         }
524
525         assert_eq!(self.scopes_len(), orig_scopes_len);
526
527         return llbb;
528     }
529 }
530
531 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
532     fn top_ast_scope(&self) -> Option<ast::NodeId> {
533         /*!
534          * Returns the id of the current top-most AST scope, if any.
535          */
536         for scope in self.scopes.borrow().iter().rev() {
537             match scope.kind {
538                 CustomScopeKind | LoopScopeKind(..) => {}
539                 AstScopeKind(i) => {
540                     return Some(i);
541                 }
542             }
543         }
544         None
545     }
546
547     fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
548         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
549     }
550
551     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
552         self.is_valid_custom_scope(custom_scope) &&
553             custom_scope.index == self.scopes.borrow().len() - 1
554     }
555
556     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
557         let scopes = self.scopes.borrow();
558         custom_scope.index < scopes.len() &&
559             (*scopes)[custom_scope.index].kind.is_temp()
560     }
561
562     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
563                             bcx: Block<'blk, 'tcx>,
564                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
565         /*! Generates the cleanups for `scope` into `bcx` */
566
567         let mut bcx = bcx;
568         if !bcx.unreachable.get() {
569             for cleanup in scope.cleanups.iter().rev() {
570                 bcx = cleanup.trans(bcx, scope.debug_loc);
571             }
572         }
573         bcx
574     }
575
576     fn scopes_len(&self) -> uint {
577         self.scopes.borrow().len()
578     }
579
580     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
581         self.scopes.borrow_mut().push(scope)
582     }
583
584     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
585         debug!("popping cleanup scope {}, {} scopes remaining",
586                self.top_scope(|s| s.block_name("")),
587                self.scopes_len() - 1);
588
589         self.scopes.borrow_mut().pop().unwrap()
590     }
591
592     fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R {
593         f(self.scopes.borrow().last().unwrap())
594     }
595
596     fn trans_cleanups_to_exit_scope(&'blk self,
597                                     label: EarlyExitLabel)
598                                     -> BasicBlockRef {
599         /*!
600          * Used when the caller wishes to jump to an early exit, such
601          * as a return, break, continue, or unwind. This function will
602          * generate all cleanups between the top of the stack and the
603          * exit `label` and return a basic block that the caller can
604          * branch to.
605          *
606          * For example, if the current stack of cleanups were as follows:
607          *
608          *      AST 22
609          *      Custom 1
610          *      AST 23
611          *      Loop 23
612          *      Custom 2
613          *      AST 24
614          *
615          * and the `label` specifies a break from `Loop 23`, then this
616          * function would generate a series of basic blocks as follows:
617          *
618          *      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
619          *
620          * where `break_blk` is the block specified in `Loop 23` as
621          * the target for breaks. The return value would be the first
622          * basic block in that sequence (`Cleanup(AST 24)`). The
623          * caller could then branch to `Cleanup(AST 24)` and it will
624          * perform all cleanups and finally branch to the `break_blk`.
625          */
626
627         debug!("trans_cleanups_to_exit_scope label={} scopes={}",
628                label, self.scopes_len());
629
630         let orig_scopes_len = self.scopes_len();
631         let mut prev_llbb;
632         let mut popped_scopes = vec!();
633
634         // First we pop off all the cleanup stacks that are
635         // traversed until the exit is reached, pushing them
636         // onto the side vector `popped_scopes`. No code is
637         // generated at this time.
638         //
639         // So, continuing the example from above, we would wind up
640         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
641         // (Presuming that there are no cached exits)
642         loop {
643             if self.scopes_len() == 0 {
644                 match label {
645                     UnwindExit => {
646                         // Generate a block that will `Resume`.
647                         let prev_bcx = self.new_block(true, "resume", None);
648                         let personality = self.personality.get().expect(
649                             "create_landing_pad() should have set this");
650                         build::Resume(prev_bcx,
651                                       build::Load(prev_bcx, personality));
652                         prev_llbb = prev_bcx.llbb;
653                         break;
654                     }
655
656                     ReturnExit => {
657                         prev_llbb = self.get_llreturn();
658                         break;
659                     }
660
661                     LoopExit(id, _) => {
662                         self.ccx.sess().bug(format!(
663                                 "cannot exit from scope {}, \
664                                 not in scope", id).as_slice());
665                     }
666                 }
667             }
668
669             // Check if we have already cached the unwinding of this
670             // scope for this label. If so, we can stop popping scopes
671             // and branch to the cached label, since it contains the
672             // cleanups for any subsequent scopes.
673             match self.top_scope(|s| s.cached_early_exit(label)) {
674                 Some(cleanup_block) => {
675                     prev_llbb = cleanup_block;
676                     break;
677                 }
678                 None => { }
679             }
680
681             // Pop off the scope, since we will be generating
682             // unwinding code for it. If we are searching for a loop exit,
683             // and this scope is that loop, then stop popping and set
684             // `prev_llbb` to the appropriate exit block from the loop.
685             popped_scopes.push(self.pop_scope());
686             let scope = popped_scopes.last().unwrap();
687             match label {
688                 UnwindExit | ReturnExit => { }
689                 LoopExit(id, exit) => {
690                     match scope.kind.early_exit_block(id, exit) {
691                         Some(exitllbb) => {
692                             prev_llbb = exitllbb;
693                             break;
694                         }
695
696                         None => { }
697                     }
698                 }
699             }
700         }
701
702         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
703                popped_scopes.len());
704
705         // Now push the popped scopes back on. As we go,
706         // we track in `prev_llbb` the exit to which this scope
707         // should branch when it's done.
708         //
709         // So, continuing with our example, we will start out with
710         // `prev_llbb` being set to `break_blk` (or possibly a cached
711         // early exit). We will then pop the scopes from `popped_scopes`
712         // and generate a basic block for each one, prepending it in the
713         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
714         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
715         // branch to `prev_llbb == break_blk`, giving us a sequence like:
716         //
717         //     Cleanup(Custom 2) -> prev_llbb
718         //
719         // We then pop `AST 24` and repeat the process, giving us the sequence:
720         //
721         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
722         //
723         // At this point, `popped_scopes` is empty, and so the final block
724         // that we return to the user is `Cleanup(AST 24)`.
725         while !popped_scopes.is_empty() {
726             let mut scope = popped_scopes.pop().unwrap();
727
728             if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
729             {
730                 let name = scope.block_name("clean");
731                 debug!("generating cleanups for {}", name);
732                 let bcx_in = self.new_block(label.is_unwind(),
733                                             name.as_slice(),
734                                             None);
735                 let mut bcx_out = bcx_in;
736                 for cleanup in scope.cleanups.iter().rev() {
737                     if cleanup_is_suitable_for(&**cleanup, label) {
738                         bcx_out = cleanup.trans(bcx_out,
739                                                 scope.debug_loc);
740                     }
741                 }
742                 build::Br(bcx_out, prev_llbb);
743                 prev_llbb = bcx_in.llbb;
744             } else {
745                 debug!("no suitable cleanups in {}",
746                        scope.block_name("clean"));
747             }
748
749             scope.add_cached_early_exit(label, prev_llbb);
750             self.push_scope(scope);
751         }
752
753         debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
754
755         assert_eq!(self.scopes_len(), orig_scopes_len);
756         prev_llbb
757     }
758
759     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
760         /*!
761          * Creates a landing pad for the top scope, if one does not
762          * exist.  The landing pad will perform all cleanups necessary
763          * for an unwind and then `resume` to continue error
764          * propagation:
765          *
766          *     landing_pad -> ... cleanups ... -> [resume]
767          *
768          * (The cleanups and resume instruction are created by
769          * `trans_cleanups_to_exit_scope()`, not in this function
770          * itself.)
771          */
772
773         let pad_bcx;
774
775         debug!("get_or_create_landing_pad");
776
777         // Check if a landing pad block exists; if not, create one.
778         {
779             let mut scopes = self.scopes.borrow_mut();
780             let last_scope = scopes.last_mut().unwrap();
781             match last_scope.cached_landing_pad {
782                 Some(llbb) => { return llbb; }
783                 None => {
784                     let name = last_scope.block_name("unwind");
785                     pad_bcx = self.new_block(true, name.as_slice(), None);
786                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
787                 }
788             }
789         }
790
791         // The landing pad return type (the type being propagated). Not sure what
792         // this represents but it's determined by the personality function and
793         // this is what the EH proposal example uses.
794         let llretty = Type::struct_(self.ccx,
795                                     &[Type::i8p(self.ccx), Type::i32(self.ccx)],
796                                     false);
797
798         // The exception handling personality function.
799         //
800         // If our compilation unit has the `eh_personality` lang item somewhere
801         // within it, then we just need to translate that. Otherwise, we're
802         // building an rlib which will depend on some upstream implementation of
803         // this function, so we just codegen a generic reference to it. We don't
804         // specify any of the types for the function, we just make it a symbol
805         // that LLVM can later use.
806         let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
807             Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
808             None => {
809                 let mut personality = self.ccx.eh_personality().borrow_mut();
810                 match *personality {
811                     Some(llpersonality) => llpersonality,
812                     None => {
813                         let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
814                         let f = base::decl_cdecl_fn(self.ccx,
815                                                     "rust_eh_personality",
816                                                     fty,
817                                                     ty::mk_i32());
818                         *personality = Some(f);
819                         f
820                     }
821                 }
822             }
823         };
824
825         // The only landing pad clause will be 'cleanup'
826         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
827
828         // The landing pad block is a cleanup
829         build::SetCleanup(pad_bcx, llretval);
830
831         // We store the retval in a function-central alloca, so that calls to
832         // Resume can find it.
833         match self.personality.get() {
834             Some(addr) => {
835                 build::Store(pad_bcx, llretval, addr);
836             }
837             None => {
838                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
839                 self.personality.set(Some(addr));
840                 build::Store(pad_bcx, llretval, addr);
841             }
842         }
843
844         // Generate the cleanup block and branch to it.
845         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
846         build::Br(pad_bcx, cleanup_llbb);
847
848         return pad_bcx.llbb;
849     }
850 }
851
852 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
853     fn new(kind: CleanupScopeKind<'blk, 'tcx>,
854            debug_loc: Option<NodeInfo>)
855         -> CleanupScope<'blk, 'tcx> {
856         CleanupScope {
857             kind: kind,
858             debug_loc: debug_loc,
859             cleanups: vec!(),
860             cached_early_exits: vec!(),
861             cached_landing_pad: None,
862         }
863     }
864
865     fn clear_cached_exits(&mut self) {
866         self.cached_early_exits = vec!();
867         self.cached_landing_pad = None;
868     }
869
870     fn cached_early_exit(&self,
871                          label: EarlyExitLabel)
872                          -> Option<BasicBlockRef> {
873         self.cached_early_exits.iter().
874             find(|e| e.label == label).
875             map(|e| e.cleanup_block)
876     }
877
878     fn add_cached_early_exit(&mut self,
879                              label: EarlyExitLabel,
880                              blk: BasicBlockRef) {
881         self.cached_early_exits.push(
882             CachedEarlyExit { label: label,
883                               cleanup_block: blk });
884     }
885
886     fn needs_invoke(&self) -> bool {
887         /*! True if this scope has cleanups that need unwinding */
888
889         self.cached_landing_pad.is_some() ||
890             self.cleanups.iter().any(|c| c.must_unwind())
891     }
892
893     fn block_name(&self, prefix: &str) -> String {
894         /*!
895          * Returns a suitable name to use for the basic block that
896          * handles this cleanup scope
897          */
898
899         match self.kind {
900             CustomScopeKind => format!("{}_custom_", prefix),
901             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
902             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
903         }
904     }
905
906     pub fn drop_non_lifetime_clean(&mut self) {
907         self.cleanups.retain(|c| c.is_lifetime_end());
908     }
909 }
910
911 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
912     fn is_temp(&self) -> bool {
913         match *self {
914             CustomScopeKind => true,
915             LoopScopeKind(..) | AstScopeKind(..) => false,
916         }
917     }
918
919     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
920         match *self {
921             CustomScopeKind | LoopScopeKind(..) => false,
922             AstScopeKind(i) => i == id
923         }
924     }
925
926     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
927         match *self {
928             CustomScopeKind | AstScopeKind(..) => false,
929             LoopScopeKind(i, _) => i == id
930         }
931     }
932
933     fn early_exit_block(&self,
934                         id: ast::NodeId,
935                         exit: uint) -> Option<BasicBlockRef> {
936         /*!
937          * If this is a loop scope with id `id`, return the early
938          * exit block `exit`, else `None`
939          */
940
941         match *self {
942             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
943             _ => None,
944         }
945     }
946 }
947
948 impl EarlyExitLabel {
949     fn is_unwind(&self) -> bool {
950         match *self {
951             UnwindExit => true,
952             _ => false
953         }
954     }
955 }
956
957 ///////////////////////////////////////////////////////////////////////////
958 // Cleanup types
959
960 pub struct DropValue<'tcx> {
961     is_immediate: bool,
962     must_unwind: bool,
963     val: ValueRef,
964     ty: Ty<'tcx>,
965     zero: bool
966 }
967
968 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
969     fn must_unwind(&self) -> bool {
970         self.must_unwind
971     }
972
973     fn clean_on_unwind(&self) -> bool {
974         self.must_unwind
975     }
976
977     fn is_lifetime_end(&self) -> bool {
978         false
979     }
980
981     fn trans<'blk>(&self,
982                    bcx: Block<'blk, 'tcx>,
983                    debug_loc: Option<NodeInfo>)
984                    -> Block<'blk, 'tcx> {
985         let bcx = if self.is_immediate {
986             glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
987         } else {
988             glue::drop_ty(bcx, self.val, self.ty, debug_loc)
989         };
990         if self.zero {
991             base::zero_mem(bcx, self.val, self.ty);
992         }
993         bcx
994     }
995 }
996
997 #[deriving(Show)]
998 pub enum Heap {
999     HeapExchange
1000 }
1001
1002 pub struct FreeValue<'tcx> {
1003     ptr: ValueRef,
1004     heap: Heap,
1005     content_ty: Ty<'tcx>
1006 }
1007
1008 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
1009     fn must_unwind(&self) -> bool {
1010         true
1011     }
1012
1013     fn clean_on_unwind(&self) -> bool {
1014         true
1015     }
1016
1017     fn is_lifetime_end(&self) -> bool {
1018         false
1019     }
1020
1021     fn trans<'blk>(&self,
1022                    bcx: Block<'blk, 'tcx>,
1023                    debug_loc: Option<NodeInfo>)
1024                    -> Block<'blk, 'tcx> {
1025         apply_debug_loc(bcx.fcx, debug_loc);
1026
1027         match self.heap {
1028             HeapExchange => {
1029                 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
1030             }
1031         }
1032     }
1033 }
1034
1035 pub struct FreeSlice {
1036     ptr: ValueRef,
1037     size: ValueRef,
1038     align: ValueRef,
1039     heap: Heap,
1040 }
1041
1042 impl<'tcx> Cleanup<'tcx> for FreeSlice {
1043     fn must_unwind(&self) -> bool {
1044         true
1045     }
1046
1047     fn clean_on_unwind(&self) -> bool {
1048         true
1049     }
1050
1051     fn is_lifetime_end(&self) -> bool {
1052         false
1053     }
1054
1055     fn trans<'blk, 'tcx>(&self,
1056                          bcx: Block<'blk, 'tcx>,
1057                          debug_loc: Option<NodeInfo>)
1058                       -> Block<'blk, 'tcx> {
1059         apply_debug_loc(bcx.fcx, debug_loc);
1060
1061         match self.heap {
1062             HeapExchange => {
1063                 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
1064             }
1065         }
1066     }
1067 }
1068
1069 pub struct LifetimeEnd {
1070     ptr: ValueRef,
1071 }
1072
1073 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
1074     fn must_unwind(&self) -> bool {
1075         false
1076     }
1077
1078     fn clean_on_unwind(&self) -> bool {
1079         true
1080     }
1081
1082     fn is_lifetime_end(&self) -> bool {
1083         true
1084     }
1085
1086     fn trans<'blk, 'tcx>(&self,
1087                          bcx: Block<'blk, 'tcx>,
1088                          debug_loc: Option<NodeInfo>)
1089                       -> Block<'blk, 'tcx> {
1090         apply_debug_loc(bcx.fcx, debug_loc);
1091         base::call_lifetime_end(bcx, self.ptr);
1092         bcx
1093     }
1094 }
1095
1096 pub fn temporary_scope(tcx: &ty::ctxt,
1097                        id: ast::NodeId)
1098                        -> ScopeId {
1099     match tcx.region_maps.temporary_scope(id) {
1100         Some(scope) => {
1101             let r = AstScope(scope.node_id());
1102             debug!("temporary_scope({}) = {}", id, r);
1103             r
1104         }
1105         None => {
1106             tcx.sess.bug(format!("no temporary scope available for expr {}",
1107                                  id).as_slice())
1108         }
1109     }
1110 }
1111
1112 pub fn var_scope(tcx: &ty::ctxt,
1113                  id: ast::NodeId)
1114                  -> ScopeId {
1115     let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1116     debug!("var_scope({}) = {}", id, r);
1117     r
1118 }
1119
1120 fn cleanup_is_suitable_for(c: &Cleanup,
1121                            label: EarlyExitLabel) -> bool {
1122     !label.is_unwind() || c.clean_on_unwind()
1123 }
1124
1125 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1126     match debug_loc {
1127         Some(ref src_loc) => {
1128             debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1129         }
1130         None => {
1131             debuginfo::clear_source_location(fcx);
1132         }
1133     }
1134 }
1135
1136 ///////////////////////////////////////////////////////////////////////////
1137 // These traits just exist to put the methods into this file.
1138
1139 pub trait CleanupMethods<'blk, 'tcx> {
1140     fn push_ast_cleanup_scope(&self, id: NodeInfo);
1141     fn push_loop_cleanup_scope(&self,
1142                                id: ast::NodeId,
1143                                exits: [Block<'blk, 'tcx>, ..EXIT_MAX]);
1144     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1145     fn push_custom_cleanup_scope_with_debug_loc(&self,
1146                                                 debug_loc: NodeInfo)
1147                                                 -> CustomScopeIndex;
1148     fn pop_and_trans_ast_cleanup_scope(&self,
1149                                               bcx: Block<'blk, 'tcx>,
1150                                               cleanup_scope: ast::NodeId)
1151                                               -> Block<'blk, 'tcx>;
1152     fn pop_loop_cleanup_scope(&self,
1153                               cleanup_scope: ast::NodeId);
1154     fn pop_custom_cleanup_scope(&self,
1155                                 custom_scope: CustomScopeIndex);
1156     fn pop_and_trans_custom_cleanup_scope(&self,
1157                                           bcx: Block<'blk, 'tcx>,
1158                                           custom_scope: CustomScopeIndex)
1159                                           -> Block<'blk, 'tcx>;
1160     fn top_loop_scope(&self) -> ast::NodeId;
1161     fn normal_exit_block(&'blk self,
1162                          cleanup_scope: ast::NodeId,
1163                          exit: uint) -> BasicBlockRef;
1164     fn return_exit_block(&'blk self) -> BasicBlockRef;
1165     fn schedule_lifetime_end(&self,
1166                          cleanup_scope: ScopeId,
1167                          val: ValueRef);
1168     fn schedule_drop_mem(&self,
1169                          cleanup_scope: ScopeId,
1170                          val: ValueRef,
1171                          ty: Ty<'tcx>);
1172     fn schedule_drop_and_zero_mem(&self,
1173                                   cleanup_scope: ScopeId,
1174                                   val: ValueRef,
1175                                   ty: Ty<'tcx>);
1176     fn schedule_drop_immediate(&self,
1177                                cleanup_scope: ScopeId,
1178                                val: ValueRef,
1179                                ty: Ty<'tcx>);
1180     fn schedule_free_value(&self,
1181                            cleanup_scope: ScopeId,
1182                            val: ValueRef,
1183                            heap: Heap,
1184                            content_ty: Ty<'tcx>);
1185     fn schedule_free_slice(&self,
1186                            cleanup_scope: ScopeId,
1187                            val: ValueRef,
1188                            size: ValueRef,
1189                            align: ValueRef,
1190                            heap: Heap);
1191     fn schedule_clean(&self,
1192                       cleanup_scope: ScopeId,
1193                       cleanup: CleanupObj<'tcx>);
1194     fn schedule_clean_in_ast_scope(&self,
1195                                    cleanup_scope: ast::NodeId,
1196                                    cleanup: CleanupObj<'tcx>);
1197     fn schedule_clean_in_custom_scope(&self,
1198                                     custom_scope: CustomScopeIndex,
1199                                     cleanup: CleanupObj<'tcx>);
1200     fn needs_invoke(&self) -> bool;
1201     fn get_landing_pad(&'blk self) -> BasicBlockRef;
1202 }
1203
1204 trait CleanupHelperMethods<'blk, 'tcx> {
1205     fn top_ast_scope(&self) -> Option<ast::NodeId>;
1206     fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1207     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1208     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1209     fn trans_scope_cleanups(&self,
1210                             bcx: Block<'blk, 'tcx>,
1211                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1212     fn trans_cleanups_to_exit_scope(&'blk self,
1213                                     label: EarlyExitLabel)
1214                                     -> BasicBlockRef;
1215     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1216     fn scopes_len(&self) -> uint;
1217     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1218     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1219     fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R;
1220 }