]> git.lizzy.rs Git - rust.git/blob - src/librustc/middle/trans/cleanup.rs
std: Remove format_strbuf!()
[rust.git] / src / librustc / middle / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12  * Code pertaining to cleanup of temporaries as well as execution of
13  * drop glue. See discussion in `doc.rs` for a high-level summary.
14  */
15
16 use lib::llvm::{BasicBlockRef, ValueRef};
17 use middle::trans::base;
18 use middle::trans::build;
19 use middle::trans::callee;
20 use middle::trans::common;
21 use middle::trans::common::{Block, FunctionContext, ExprId};
22 use middle::trans::glue;
23 use middle::trans::type_::Type;
24 use middle::ty;
25 use syntax::ast;
26 use util::ppaux::Repr;
27
28
29 pub struct CleanupScope<'a> {
30     // The id of this cleanup scope. If the id is None,
31     // this is a *temporary scope* that is pushed during trans to
32     // cleanup miscellaneous garbage that trans may generate whose
33     // lifetime is a subset of some expression.  See module doc for
34     // more details.
35     kind: CleanupScopeKind<'a>,
36
37     // Cleanups to run upon scope exit.
38     cleanups: Vec<Box<Cleanup>>,
39
40     cached_early_exits: Vec<CachedEarlyExit>,
41     cached_landing_pad: Option<BasicBlockRef>,
42 }
43
44 pub struct CustomScopeIndex {
45     index: uint
46 }
47
48 pub static EXIT_BREAK: uint = 0;
49 pub static EXIT_LOOP: uint = 1;
50 pub static EXIT_MAX: uint = 2;
51
52 pub enum CleanupScopeKind<'a> {
53     CustomScopeKind,
54     AstScopeKind(ast::NodeId),
55     LoopScopeKind(ast::NodeId, [&'a Block<'a>, ..EXIT_MAX])
56 }
57
58 #[deriving(Eq)]
59 pub enum EarlyExitLabel {
60     UnwindExit,
61     ReturnExit,
62     LoopExit(ast::NodeId, uint)
63 }
64
65 pub struct CachedEarlyExit {
66     label: EarlyExitLabel,
67     cleanup_block: BasicBlockRef,
68 }
69
70 pub trait Cleanup {
71     fn clean_on_unwind(&self) -> bool;
72     fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a>;
73 }
74
75 pub enum ScopeId {
76     AstScope(ast::NodeId),
77     CustomScope(CustomScopeIndex)
78 }
79
80 impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
81     fn push_ast_cleanup_scope(&self, id: ast::NodeId) {
82         /*!
83          * Invoked when we start to trans the code contained
84          * within a new cleanup scope.
85          */
86
87         debug!("push_ast_cleanup_scope({})",
88                self.ccx.tcx.map.node_to_str(id));
89
90         // FIXME(#2202) -- currently closure bodies have a parent
91         // region, which messes up the assertion below, since there
92         // are no cleanup scopes on the stack at the start of
93         // trans'ing a closure body.  I think though that this should
94         // eventually be fixed by closure bodies not having a parent
95         // region, though that's a touch unclear, and it might also be
96         // better just to narrow this assertion more (i.e., by
97         // excluding id's that correspond to closure bodies only). For
98         // now we just say that if there is already an AST scope on the stack,
99         // this new AST scope had better be its immediate child.
100         let top_scope = self.top_ast_scope();
101         if top_scope.is_some() {
102             assert_eq!(self.ccx.tcx.region_maps.opt_encl_scope(id), top_scope);
103         }
104
105         self.push_scope(CleanupScope::new(AstScopeKind(id)));
106     }
107
108     fn push_loop_cleanup_scope(&self,
109                                id: ast::NodeId,
110                                exits: [&'a Block<'a>, ..EXIT_MAX]) {
111         debug!("push_loop_cleanup_scope({})",
112                self.ccx.tcx.map.node_to_str(id));
113         assert_eq!(Some(id), self.top_ast_scope());
114
115         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits)));
116     }
117
118     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
119         let index = self.scopes_len();
120         debug!("push_custom_cleanup_scope(): {}", index);
121         self.push_scope(CleanupScope::new(CustomScopeKind));
122         CustomScopeIndex { index: index }
123     }
124
125     fn pop_and_trans_ast_cleanup_scope(&self,
126                                        bcx: &'a Block<'a>,
127                                        cleanup_scope: ast::NodeId)
128                                        -> &'a Block<'a> {
129         /*!
130          * Removes the cleanup scope for id `cleanup_scope`, which
131          * must be at the top of the cleanup stack, and generates the
132          * code to do its cleanups for normal exit.
133          */
134
135         debug!("pop_and_trans_ast_cleanup_scope({})",
136                self.ccx.tcx.map.node_to_str(cleanup_scope));
137
138         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
139
140         let scope = self.pop_scope();
141         self.trans_scope_cleanups(bcx, &scope)
142
143     }
144
145     fn pop_loop_cleanup_scope(&self,
146                               cleanup_scope: ast::NodeId) {
147         /*!
148          * Removes the loop cleanup scope for id `cleanup_scope`, which
149          * must be at the top of the cleanup stack. Does not generate
150          * any cleanup code, since loop scopes should exit by
151          * branching to a block generated by `normal_exit_block`.
152          */
153
154         debug!("pop_loop_cleanup_scope({})",
155                self.ccx.tcx.map.node_to_str(cleanup_scope));
156
157         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
158
159         let _ = self.pop_scope();
160     }
161
162     fn pop_custom_cleanup_scope(&self,
163                                 custom_scope: CustomScopeIndex) {
164         /*!
165          * Removes the top cleanup scope from the stack without
166          * executing its cleanups. The top cleanup scope must
167          * be the temporary scope `custom_scope`.
168          */
169
170         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
171         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
172         let _ = self.pop_scope();
173     }
174
175     fn pop_and_trans_custom_cleanup_scope(&self,
176                                         bcx: &'a Block<'a>,
177                                         custom_scope: CustomScopeIndex)
178                                         -> &'a Block<'a> {
179         /*!
180          * Removes the top cleanup scope from the stack, which must be
181          * a temporary scope, and generates the code to do its
182          * cleanups for normal exit.
183          */
184
185         debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
186         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
187
188         let scope = self.pop_scope();
189         self.trans_scope_cleanups(bcx, &scope)
190     }
191
192     fn top_loop_scope(&self) -> ast::NodeId {
193         /*!
194          * Returns the id of the top-most loop scope
195          */
196
197         for scope in self.scopes.borrow().iter().rev() {
198             match scope.kind {
199                 LoopScopeKind(id, _) => {
200                     return id;
201                 }
202                 _ => {}
203             }
204         }
205         self.ccx.sess().bug("no loop scope found");
206     }
207
208     fn normal_exit_block(&'a self,
209                          cleanup_scope: ast::NodeId,
210                          exit: uint) -> BasicBlockRef {
211         /*!
212          * Returns a block to branch to which will perform all pending
213          * cleanups and then break/continue (depending on `exit`) out
214          * of the loop with id `cleanup_scope`
215          */
216
217         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
218     }
219
220     fn return_exit_block(&'a self) -> BasicBlockRef {
221         /*!
222          * Returns a block to branch to which will perform all pending
223          * cleanups and then return from this function
224          */
225
226         self.trans_cleanups_to_exit_scope(ReturnExit)
227     }
228
229     fn schedule_drop_mem(&self,
230                          cleanup_scope: ScopeId,
231                          val: ValueRef,
232                          ty: ty::t) {
233         /*!
234          * Schedules a (deep) drop of `val`, which is a pointer to an
235          * instance of `ty`
236          */
237
238         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
239         let drop = box DropValue {
240             is_immediate: false,
241             on_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
242             val: val,
243             ty: ty
244         };
245
246         debug!("schedule_drop_mem({:?}, val={}, ty={})",
247                cleanup_scope,
248                self.ccx.tn.val_to_str(val),
249                ty.repr(self.ccx.tcx()));
250
251         self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
252     }
253
254     fn schedule_drop_immediate(&self,
255                                cleanup_scope: ScopeId,
256                                val: ValueRef,
257                                ty: ty::t) {
258         /*!
259          * Schedules a (deep) drop of `val`, which is an instance of `ty`
260          */
261
262         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
263         let drop = box DropValue {
264             is_immediate: true,
265             on_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
266             val: val,
267             ty: ty
268         };
269
270         debug!("schedule_drop_immediate({:?}, val={}, ty={})",
271                cleanup_scope,
272                self.ccx.tn.val_to_str(val),
273                ty.repr(self.ccx.tcx()));
274
275         self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
276     }
277
278     fn schedule_free_value(&self,
279                            cleanup_scope: ScopeId,
280                            val: ValueRef,
281                            heap: Heap,
282                            content_ty: ty::t) {
283         /*!
284          * Schedules a call to `free(val)`. Note that this is a shallow
285          * operation.
286          */
287
288         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
289
290         debug!("schedule_free_value({:?}, val={}, heap={:?})",
291                cleanup_scope,
292                self.ccx.tn.val_to_str(val),
293                heap);
294
295         self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
296     }
297
298     fn schedule_clean(&self,
299                       cleanup_scope: ScopeId,
300                       cleanup: Box<Cleanup>) {
301         match cleanup_scope {
302             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
303             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
304         }
305     }
306
307     fn schedule_clean_in_ast_scope(&self,
308                                    cleanup_scope: ast::NodeId,
309                                    cleanup: Box<Cleanup>) {
310         /*!
311          * Schedules a cleanup to occur upon exit from `cleanup_scope`.
312          * If `cleanup_scope` is not provided, then the cleanup is scheduled
313          * in the topmost scope, which must be a temporary scope.
314          */
315
316         debug!("schedule_clean_in_ast_scope(cleanup_scope={:?})",
317                cleanup_scope);
318
319         for scope in self.scopes.borrow_mut().mut_iter().rev() {
320             if scope.kind.is_ast_with_id(cleanup_scope) {
321                 scope.cleanups.push(cleanup);
322                 scope.clear_cached_exits();
323                 return;
324             } else {
325                 // will be adding a cleanup to some enclosing scope
326                 scope.clear_cached_exits();
327             }
328         }
329
330         self.ccx.sess().bug(
331             format!("no cleanup scope {} found",
332                     self.ccx.tcx.map.node_to_str(cleanup_scope)).as_slice());
333     }
334
335     fn schedule_clean_in_custom_scope(&self,
336                                       custom_scope: CustomScopeIndex,
337                                       cleanup: Box<Cleanup>) {
338         /*!
339          * Schedules a cleanup to occur in the top-most scope,
340          * which must be a temporary scope.
341          */
342
343         debug!("schedule_clean_in_custom_scope(custom_scope={})",
344                custom_scope.index);
345
346         assert!(self.is_valid_custom_scope(custom_scope));
347
348         let mut scopes = self.scopes.borrow_mut();
349         let scope = scopes.get_mut(custom_scope.index);
350         scope.cleanups.push(cleanup);
351         scope.clear_cached_exits();
352     }
353
354     fn needs_invoke(&self) -> bool {
355         /*!
356          * Returns true if there are pending cleanups that should
357          * execute on failure.
358          */
359
360         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
361     }
362
363     fn get_landing_pad(&'a self) -> BasicBlockRef {
364         /*!
365          * Returns a basic block to branch to in the event of a failure.
366          * This block will run the failure cleanups and eventually
367          * invoke the LLVM `Resume` instruction.
368          */
369
370         let _icx = base::push_ctxt("get_landing_pad");
371
372         debug!("get_landing_pad");
373
374         let orig_scopes_len = self.scopes_len();
375         assert!(orig_scopes_len > 0);
376
377         // Remove any scopes that do not have cleanups on failure:
378         let mut popped_scopes = vec!();
379         while !self.top_scope(|s| s.needs_invoke()) {
380             debug!("top scope does not need invoke");
381             popped_scopes.push(self.pop_scope());
382         }
383
384         // Check for an existing landing pad in the new topmost scope:
385         let llbb = self.get_or_create_landing_pad();
386
387         // Push the scopes we removed back on:
388         loop {
389             match popped_scopes.pop() {
390                 Some(scope) => self.push_scope(scope),
391                 None => break
392             }
393         }
394
395         assert_eq!(self.scopes_len(), orig_scopes_len);
396
397         return llbb;
398     }
399 }
400
401 impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> {
402     fn top_ast_scope(&self) -> Option<ast::NodeId> {
403         /*!
404          * Returns the id of the current top-most AST scope, if any.
405          */
406         for scope in self.scopes.borrow().iter().rev() {
407             match scope.kind {
408                 CustomScopeKind | LoopScopeKind(..) => {}
409                 AstScopeKind(i) => {
410                     return Some(i);
411                 }
412             }
413         }
414         None
415     }
416
417     fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
418         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
419     }
420
421     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
422         self.is_valid_custom_scope(custom_scope) &&
423             custom_scope.index == self.scopes.borrow().len() - 1
424     }
425
426     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
427         let scopes = self.scopes.borrow();
428         custom_scope.index < scopes.len() &&
429             scopes.get(custom_scope.index).kind.is_temp()
430     }
431
432     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
433                             bcx: &'a Block<'a>,
434                             scope: &CleanupScope) -> &'a Block<'a> {
435         /*! Generates the cleanups for `scope` into `bcx` */
436
437         let mut bcx = bcx;
438         if !bcx.unreachable.get() {
439             for cleanup in scope.cleanups.iter().rev() {
440                 bcx = cleanup.trans(bcx);
441             }
442         }
443         bcx
444     }
445
446     fn scopes_len(&self) -> uint {
447         self.scopes.borrow().len()
448     }
449
450     fn push_scope(&self, scope: CleanupScope<'a>) {
451         self.scopes.borrow_mut().push(scope)
452     }
453
454     fn pop_scope(&self) -> CleanupScope<'a> {
455         debug!("popping cleanup scope {}, {} scopes remaining",
456                self.top_scope(|s| s.block_name("")),
457                self.scopes_len() - 1);
458
459         self.scopes.borrow_mut().pop().unwrap()
460     }
461
462     fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R {
463         f(self.scopes.borrow().last().unwrap())
464     }
465
466     fn trans_cleanups_to_exit_scope(&'a self,
467                                     label: EarlyExitLabel)
468                                     -> BasicBlockRef {
469         /*!
470          * Used when the caller wishes to jump to an early exit, such
471          * as a return, break, continue, or unwind. This function will
472          * generate all cleanups between the top of the stack and the
473          * exit `label` and return a basic block that the caller can
474          * branch to.
475          *
476          * For example, if the current stack of cleanups were as follows:
477          *
478          *      AST 22
479          *      Custom 1
480          *      AST 23
481          *      Loop 23
482          *      Custom 2
483          *      AST 24
484          *
485          * and the `label` specifies a break from `Loop 23`, then this
486          * function would generate a series of basic blocks as follows:
487          *
488          *      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
489          *
490          * where `break_blk` is the block specified in `Loop 23` as
491          * the target for breaks. The return value would be the first
492          * basic block in that sequence (`Cleanup(AST 24)`). The
493          * caller could then branch to `Cleanup(AST 24)` and it will
494          * perform all cleanups and finally branch to the `break_blk`.
495          */
496
497         debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
498                label, self.scopes_len());
499
500         let orig_scopes_len = self.scopes_len();
501         let mut prev_llbb;
502         let mut popped_scopes = vec!();
503
504         // First we pop off all the cleanup stacks that are
505         // traversed until the exit is reached, pushing them
506         // onto the side vector `popped_scopes`. No code is
507         // generated at this time.
508         //
509         // So, continuing the example from above, we would wind up
510         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
511         // (Presuming that there are no cached exits)
512         loop {
513             if self.scopes_len() == 0 {
514                 match label {
515                     UnwindExit => {
516                         // Generate a block that will `Resume`.
517                         let prev_bcx = self.new_block(true, "resume", None);
518                         let personality = self.personality.get().expect(
519                             "create_landing_pad() should have set this");
520                         build::Resume(prev_bcx,
521                                       build::Load(prev_bcx, personality));
522                         prev_llbb = prev_bcx.llbb;
523                         break;
524                     }
525
526                     ReturnExit => {
527                         prev_llbb = self.get_llreturn();
528                         break;
529                     }
530
531                     LoopExit(id, _) => {
532                         self.ccx.sess().bug(format!(
533                                 "cannot exit from scope {:?}, \
534                                 not in scope", id).as_slice());
535                     }
536                 }
537             }
538
539             // Check if we have already cached the unwinding of this
540             // scope for this label. If so, we can stop popping scopes
541             // and branch to the cached label, since it contains the
542             // cleanups for any subsequent scopes.
543             match self.top_scope(|s| s.cached_early_exit(label)) {
544                 Some(cleanup_block) => {
545                     prev_llbb = cleanup_block;
546                     break;
547                 }
548                 None => { }
549             }
550
551             // Pop off the scope, since we will be generating
552             // unwinding code for it. If we are searching for a loop exit,
553             // and this scope is that loop, then stop popping and set
554             // `prev_llbb` to the appropriate exit block from the loop.
555             popped_scopes.push(self.pop_scope());
556             let scope = popped_scopes.last().unwrap();
557             match label {
558                 UnwindExit | ReturnExit => { }
559                 LoopExit(id, exit) => {
560                     match scope.kind.early_exit_block(id, exit) {
561                         Some(exitllbb) => {
562                             prev_llbb = exitllbb;
563                             break;
564                         }
565
566                         None => { }
567                     }
568                 }
569             }
570         }
571
572         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
573                popped_scopes.len());
574
575         // Now push the popped scopes back on. As we go,
576         // we track in `prev_llbb` the exit to which this scope
577         // should branch when it's done.
578         //
579         // So, continuing with our example, we will start out with
580         // `prev_llbb` being set to `break_blk` (or possibly a cached
581         // early exit). We will then pop the scopes from `popped_scopes`
582         // and generate a basic block for each one, prepending it in the
583         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
584         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
585         // branch to `prev_llbb == break_blk`, giving us a sequence like:
586         //
587         //     Cleanup(Custom 2) -> prev_llbb
588         //
589         // We then pop `AST 24` and repeat the process, giving us the sequence:
590         //
591         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
592         //
593         // At this point, `popped_scopes` is empty, and so the final block
594         // that we return to the user is `Cleanup(AST 24)`.
595         while !popped_scopes.is_empty() {
596             let mut scope = popped_scopes.pop().unwrap();
597
598             if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(*c, label))
599             {
600                 let name = scope.block_name("clean");
601                 debug!("generating cleanups for {}", name);
602                 let bcx_in = self.new_block(label.is_unwind(),
603                                             name.as_slice(),
604                                             None);
605                 let mut bcx_out = bcx_in;
606                 for cleanup in scope.cleanups.iter().rev() {
607                     if cleanup_is_suitable_for(*cleanup, label) {
608                         bcx_out = cleanup.trans(bcx_out);
609                     }
610                 }
611                 build::Br(bcx_out, prev_llbb);
612                 prev_llbb = bcx_in.llbb;
613             } else {
614                 debug!("no suitable cleanups in {}",
615                        scope.block_name("clean"));
616             }
617
618             scope.add_cached_early_exit(label, prev_llbb);
619             self.push_scope(scope);
620         }
621
622         debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
623
624         assert_eq!(self.scopes_len(), orig_scopes_len);
625         prev_llbb
626     }
627
628     fn get_or_create_landing_pad(&'a self) -> BasicBlockRef {
629         /*!
630          * Creates a landing pad for the top scope, if one does not
631          * exist.  The landing pad will perform all cleanups necessary
632          * for an unwind and then `resume` to continue error
633          * propagation:
634          *
635          *     landing_pad -> ... cleanups ... -> [resume]
636          *
637          * (The cleanups and resume instruction are created by
638          * `trans_cleanups_to_exit_scope()`, not in this function
639          * itself.)
640          */
641
642         let pad_bcx;
643
644         debug!("get_or_create_landing_pad");
645
646         // Check if a landing pad block exists; if not, create one.
647         {
648             let mut scopes = self.scopes.borrow_mut();
649             let last_scope = scopes.mut_last().unwrap();
650             match last_scope.cached_landing_pad {
651                 Some(llbb) => { return llbb; }
652                 None => {
653                     let name = last_scope.block_name("unwind");
654                     pad_bcx = self.new_block(true, name.as_slice(), None);
655                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
656                 }
657             }
658         }
659
660         // The landing pad return type (the type being propagated). Not sure what
661         // this represents but it's determined by the personality function and
662         // this is what the EH proposal example uses.
663         let llretty = Type::struct_(self.ccx,
664                                     [Type::i8p(self.ccx), Type::i32(self.ccx)],
665                                     false);
666
667         // The exception handling personality function.
668         //
669         // If our compilation unit has the `eh_personality` lang item somewhere
670         // within it, then we just need to translate that. Otherwise, we're
671         // building an rlib which will depend on some upstream implementation of
672         // this function, so we just codegen a generic reference to it. We don't
673         // specify any of the types for the function, we just make it a symbol
674         // that LLVM can later use.
675         let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
676             Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
677             None => {
678                 let mut personality = self.ccx.eh_personality.borrow_mut();
679                 match *personality {
680                     Some(llpersonality) => llpersonality,
681                     None => {
682                         let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
683                         let f = base::decl_cdecl_fn(self.ccx.llmod,
684                                                     "rust_eh_personality",
685                                                     fty,
686                                                     ty::mk_i32());
687                         *personality = Some(f);
688                         f
689                     }
690                 }
691             }
692         };
693
694         // The only landing pad clause will be 'cleanup'
695         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
696
697         // The landing pad block is a cleanup
698         build::SetCleanup(pad_bcx, llretval);
699
700         // We store the retval in a function-central alloca, so that calls to
701         // Resume can find it.
702         match self.personality.get() {
703             Some(addr) => {
704                 build::Store(pad_bcx, llretval, addr);
705             }
706             None => {
707                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
708                 self.personality.set(Some(addr));
709                 build::Store(pad_bcx, llretval, addr);
710             }
711         }
712
713         // Generate the cleanup block and branch to it.
714         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
715         build::Br(pad_bcx, cleanup_llbb);
716
717         return pad_bcx.llbb;
718     }
719 }
720
721 impl<'a> CleanupScope<'a> {
722     fn new(kind: CleanupScopeKind<'a>) -> CleanupScope<'a> {
723         CleanupScope {
724             kind: kind,
725             cleanups: vec!(),
726             cached_early_exits: vec!(),
727             cached_landing_pad: None,
728         }
729     }
730
731     fn clear_cached_exits(&mut self) {
732         self.cached_early_exits = vec!();
733         self.cached_landing_pad = None;
734     }
735
736     fn cached_early_exit(&self,
737                          label: EarlyExitLabel)
738                          -> Option<BasicBlockRef> {
739         self.cached_early_exits.iter().
740             find(|e| e.label == label).
741             map(|e| e.cleanup_block)
742     }
743
744     fn add_cached_early_exit(&mut self,
745                              label: EarlyExitLabel,
746                              blk: BasicBlockRef) {
747         self.cached_early_exits.push(
748             CachedEarlyExit { label: label,
749                               cleanup_block: blk });
750     }
751
752     fn needs_invoke(&self) -> bool {
753         /*! True if this scope has cleanups for use during unwinding */
754
755         self.cached_landing_pad.is_some() ||
756             self.cleanups.iter().any(|c| c.clean_on_unwind())
757     }
758
759     fn block_name(&self, prefix: &str) -> String {
760         /*!
761          * Returns a suitable name to use for the basic block that
762          * handles this cleanup scope
763          */
764
765         match self.kind {
766             CustomScopeKind => format!("{}_custom_", prefix),
767             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
768             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
769         }
770     }
771 }
772
773 impl<'a> CleanupScopeKind<'a> {
774     fn is_temp(&self) -> bool {
775         match *self {
776             CustomScopeKind => true,
777             LoopScopeKind(..) | AstScopeKind(..) => false,
778         }
779     }
780
781     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
782         match *self {
783             CustomScopeKind | LoopScopeKind(..) => false,
784             AstScopeKind(i) => i == id
785         }
786     }
787
788     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
789         match *self {
790             CustomScopeKind | AstScopeKind(..) => false,
791             LoopScopeKind(i, _) => i == id
792         }
793     }
794
795     fn early_exit_block(&self,
796                         id: ast::NodeId,
797                         exit: uint) -> Option<BasicBlockRef> {
798         /*!
799          * If this is a loop scope with id `id`, return the early
800          * exit block `exit`, else `None`
801          */
802
803         match *self {
804             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
805             _ => None,
806         }
807     }
808 }
809
810 impl EarlyExitLabel {
811     fn is_unwind(&self) -> bool {
812         match *self {
813             UnwindExit => true,
814             _ => false
815         }
816     }
817 }
818
819 ///////////////////////////////////////////////////////////////////////////
820 // Cleanup types
821
822 pub struct DropValue {
823     is_immediate: bool,
824     on_unwind: bool,
825     val: ValueRef,
826     ty: ty::t,
827 }
828
829 impl Cleanup for DropValue {
830     fn clean_on_unwind(&self) -> bool {
831         self.on_unwind
832     }
833
834     fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
835         if self.is_immediate {
836             glue::drop_ty_immediate(bcx, self.val, self.ty)
837         } else {
838             glue::drop_ty(bcx, self.val, self.ty)
839         }
840     }
841 }
842
843 pub enum Heap {
844     HeapManaged,
845     HeapExchange
846 }
847
848 pub struct FreeValue {
849     ptr: ValueRef,
850     heap: Heap,
851     content_ty: ty::t
852 }
853
854 impl Cleanup for FreeValue {
855     fn clean_on_unwind(&self) -> bool {
856         true
857     }
858
859     fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
860         match self.heap {
861             HeapManaged => {
862                 glue::trans_free(bcx, self.ptr)
863             }
864             HeapExchange => {
865                 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
866             }
867         }
868     }
869 }
870
871 pub fn temporary_scope(tcx: &ty::ctxt,
872                        id: ast::NodeId)
873                        -> ScopeId {
874     match tcx.region_maps.temporary_scope(id) {
875         Some(scope) => {
876             let r = AstScope(scope);
877             debug!("temporary_scope({}) = {:?}", id, r);
878             r
879         }
880         None => {
881             tcx.sess.bug(format!("no temporary scope available for expr {}",
882                                  id).as_slice())
883         }
884     }
885 }
886
887 pub fn var_scope(tcx: &ty::ctxt,
888                  id: ast::NodeId)
889                  -> ScopeId {
890     let r = AstScope(tcx.region_maps.var_scope(id));
891     debug!("var_scope({}) = {:?}", id, r);
892     r
893 }
894
895 fn cleanup_is_suitable_for(c: &Cleanup,
896                            label: EarlyExitLabel) -> bool {
897     !label.is_unwind() || c.clean_on_unwind()
898 }
899
900 ///////////////////////////////////////////////////////////////////////////
901 // These traits just exist to put the methods into this file.
902
903 pub trait CleanupMethods<'a> {
904     fn push_ast_cleanup_scope(&self, id: ast::NodeId);
905     fn push_loop_cleanup_scope(&self,
906                                    id: ast::NodeId,
907                                    exits: [&'a Block<'a>, ..EXIT_MAX]);
908     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
909     fn pop_and_trans_ast_cleanup_scope(&self,
910                                               bcx: &'a Block<'a>,
911                                               cleanup_scope: ast::NodeId)
912                                               -> &'a Block<'a>;
913     fn pop_loop_cleanup_scope(&self,
914                               cleanup_scope: ast::NodeId);
915     fn pop_custom_cleanup_scope(&self,
916                                 custom_scope: CustomScopeIndex);
917     fn pop_and_trans_custom_cleanup_scope(&self,
918                                           bcx: &'a Block<'a>,
919                                           custom_scope: CustomScopeIndex)
920                                           -> &'a Block<'a>;
921     fn top_loop_scope(&self) -> ast::NodeId;
922     fn normal_exit_block(&'a self,
923                          cleanup_scope: ast::NodeId,
924                          exit: uint) -> BasicBlockRef;
925     fn return_exit_block(&'a self) -> BasicBlockRef;
926     fn schedule_drop_mem(&self,
927                          cleanup_scope: ScopeId,
928                          val: ValueRef,
929                          ty: ty::t);
930     fn schedule_drop_immediate(&self,
931                                cleanup_scope: ScopeId,
932                                val: ValueRef,
933                                ty: ty::t);
934     fn schedule_free_value(&self,
935                            cleanup_scope: ScopeId,
936                            val: ValueRef,
937                            heap: Heap,
938                            content_ty: ty::t);
939     fn schedule_clean(&self,
940                       cleanup_scope: ScopeId,
941                       cleanup: Box<Cleanup>);
942     fn schedule_clean_in_ast_scope(&self,
943                                    cleanup_scope: ast::NodeId,
944                                    cleanup: Box<Cleanup>);
945     fn schedule_clean_in_custom_scope(&self,
946                                     custom_scope: CustomScopeIndex,
947                                     cleanup: Box<Cleanup>);
948     fn needs_invoke(&self) -> bool;
949     fn get_landing_pad(&'a self) -> BasicBlockRef;
950 }
951
952 trait CleanupHelperMethods<'a> {
953     fn top_ast_scope(&self) -> Option<ast::NodeId>;
954     fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
955     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
956     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
957     fn trans_scope_cleanups(&self,
958                             bcx: &'a Block<'a>,
959                             scope: &CleanupScope<'a>) -> &'a Block<'a>;
960     fn trans_cleanups_to_exit_scope(&'a self,
961                                     label: EarlyExitLabel)
962                                     -> BasicBlockRef;
963     fn get_or_create_landing_pad(&'a self) -> BasicBlockRef;
964     fn scopes_len(&self) -> uint;
965     fn push_scope(&self, scope: CleanupScope<'a>);
966     fn pop_scope(&self) -> CleanupScope<'a>;
967     fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R;
968 }