]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/cleanup.rs
auto merge of #19628 : jbranchaud/rust/add-string-as-string-doctest, r=steveklabnik
[rust.git] / src / librustc_trans / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Code pertaining to cleanup of temporaries as well as execution of
12 //! drop glue. See discussion in `doc.rs` for a high-level summary.
13
14 pub use self::ScopeId::*;
15 pub use self::CleanupScopeKind::*;
16 pub use self::EarlyExitLabel::*;
17 pub use self::Heap::*;
18
19 use llvm::{BasicBlockRef, ValueRef};
20 use trans::base;
21 use trans::build;
22 use trans::callee;
23 use trans::common;
24 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
25 use trans::debuginfo;
26 use trans::glue;
27 use middle::region;
28 use trans::type_::Type;
29 use middle::ty::{mod, Ty};
30 use std::fmt;
31 use syntax::ast;
32 use util::ppaux::Repr;
33
34 pub struct CleanupScope<'blk, 'tcx: 'blk> {
35     // The id of this cleanup scope. If the id is None,
36     // this is a *temporary scope* that is pushed during trans to
37     // cleanup miscellaneous garbage that trans may generate whose
38     // lifetime is a subset of some expression.  See module doc for
39     // more details.
40     kind: CleanupScopeKind<'blk, 'tcx>,
41
42     // Cleanups to run upon scope exit.
43     cleanups: Vec<CleanupObj<'tcx>>,
44
45     // The debug location any drop calls generated for this scope will be
46     // associated with.
47     debug_loc: Option<NodeInfo>,
48
49     cached_early_exits: Vec<CachedEarlyExit>,
50     cached_landing_pad: Option<BasicBlockRef>,
51 }
52
53 #[deriving(Show)]
54 pub struct CustomScopeIndex {
55     index: uint
56 }
57
58 impl Copy for CustomScopeIndex {}
59
60 pub const EXIT_BREAK: uint = 0;
61 pub const EXIT_LOOP: uint = 1;
62 pub const EXIT_MAX: uint = 2;
63
64 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
65     CustomScopeKind,
66     AstScopeKind(ast::NodeId),
67     LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>, ..EXIT_MAX])
68 }
69
70 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
71     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72         match *self {
73             CustomScopeKind => write!(f, "CustomScopeKind"),
74             AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
75             LoopScopeKind(nid, ref blks) => {
76                 try!(write!(f, "LoopScopeKind({}, [", nid));
77                 for blk in blks.iter() {
78                     try!(write!(f, "{:p}, ", blk));
79                 }
80                 write!(f, "])")
81             }
82         }
83     }
84 }
85
86 #[deriving(PartialEq, Show)]
87 pub enum EarlyExitLabel {
88     UnwindExit,
89     ReturnExit,
90     LoopExit(ast::NodeId, uint)
91 }
92
93 impl Copy for EarlyExitLabel {}
94
95 pub struct CachedEarlyExit {
96     label: EarlyExitLabel,
97     cleanup_block: BasicBlockRef,
98 }
99
100 impl Copy for CachedEarlyExit {}
101
102 pub trait Cleanup<'tcx> {
103     fn must_unwind(&self) -> bool;
104     fn clean_on_unwind(&self) -> bool;
105     fn is_lifetime_end(&self) -> bool;
106     fn trans<'blk>(&self,
107                    bcx: Block<'blk, 'tcx>,
108                    debug_loc: Option<NodeInfo>)
109                    -> Block<'blk, 'tcx>;
110 }
111
112 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
113
114 #[deriving(Show)]
115 pub enum ScopeId {
116     AstScope(ast::NodeId),
117     CustomScope(CustomScopeIndex)
118 }
119
120 impl Copy for ScopeId {}
121
122 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
123     /// Invoked when we start to trans the code contained within a new cleanup scope.
124     fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
125         debug!("push_ast_cleanup_scope({})",
126                self.ccx.tcx().map.node_to_string(debug_loc.id));
127
128         // FIXME(#2202) -- currently closure bodies have a parent
129         // region, which messes up the assertion below, since there
130         // are no cleanup scopes on the stack at the start of
131         // trans'ing a closure body.  I think though that this should
132         // eventually be fixed by closure bodies not having a parent
133         // region, though that's a touch unclear, and it might also be
134         // better just to narrow this assertion more (i.e., by
135         // excluding id's that correspond to closure bodies only). For
136         // now we just say that if there is already an AST scope on the stack,
137         // this new AST scope had better be its immediate child.
138         let top_scope = self.top_ast_scope();
139         if top_scope.is_some() {
140             assert_eq!(self.ccx
141                            .tcx()
142                            .region_maps
143                            .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
144                            .map(|s|s.node_id()),
145                        top_scope);
146         }
147
148         self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
149                                           Some(debug_loc)));
150     }
151
152     fn push_loop_cleanup_scope(&self,
153                                id: ast::NodeId,
154                                exits: [Block<'blk, 'tcx>, ..EXIT_MAX]) {
155         debug!("push_loop_cleanup_scope({})",
156                self.ccx.tcx().map.node_to_string(id));
157         assert_eq!(Some(id), self.top_ast_scope());
158
159         // Just copy the debuginfo source location from the enclosing scope
160         let debug_loc = self.scopes
161                             .borrow()
162                             .last()
163                             .unwrap()
164                             .debug_loc;
165
166         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
167     }
168
169     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
170         let index = self.scopes_len();
171         debug!("push_custom_cleanup_scope(): {}", index);
172
173         // Just copy the debuginfo source location from the enclosing scope
174         let debug_loc = self.scopes
175                             .borrow()
176                             .last()
177                             .map(|opt_scope| opt_scope.debug_loc)
178                             .unwrap_or(None);
179
180         self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
181         CustomScopeIndex { index: index }
182     }
183
184     fn push_custom_cleanup_scope_with_debug_loc(&self,
185                                                 debug_loc: NodeInfo)
186                                                 -> CustomScopeIndex {
187         let index = self.scopes_len();
188         debug!("push_custom_cleanup_scope(): {}", index);
189
190         self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
191         CustomScopeIndex { index: index }
192     }
193
194     /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
195     /// stack, and generates the code to do its cleanups for normal exit.
196     fn pop_and_trans_ast_cleanup_scope(&self,
197                                        bcx: Block<'blk, 'tcx>,
198                                        cleanup_scope: ast::NodeId)
199                                        -> Block<'blk, 'tcx> {
200         debug!("pop_and_trans_ast_cleanup_scope({})",
201                self.ccx.tcx().map.node_to_string(cleanup_scope));
202
203         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
204
205         let scope = self.pop_scope();
206         self.trans_scope_cleanups(bcx, &scope)
207     }
208
209     /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
210     /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
211     /// branching to a block generated by `normal_exit_block`.
212     fn pop_loop_cleanup_scope(&self,
213                               cleanup_scope: ast::NodeId) {
214         debug!("pop_loop_cleanup_scope({})",
215                self.ccx.tcx().map.node_to_string(cleanup_scope));
216
217         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
218
219         let _ = self.pop_scope();
220     }
221
222     /// Removes the top cleanup scope from the stack without executing its cleanups. The top
223     /// cleanup scope must be the temporary scope `custom_scope`.
224     fn pop_custom_cleanup_scope(&self,
225                                 custom_scope: CustomScopeIndex) {
226         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
227         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
228         let _ = self.pop_scope();
229     }
230
231     /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
232     /// generates the code to do its cleanups for normal exit.
233     fn pop_and_trans_custom_cleanup_scope(&self,
234                                           bcx: Block<'blk, 'tcx>,
235                                           custom_scope: CustomScopeIndex)
236                                           -> Block<'blk, 'tcx> {
237         debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
238         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
239
240         let scope = self.pop_scope();
241         self.trans_scope_cleanups(bcx, &scope)
242     }
243
244     /// Returns the id of the top-most loop scope
245     fn top_loop_scope(&self) -> ast::NodeId {
246         for scope in self.scopes.borrow().iter().rev() {
247             if let LoopScopeKind(id, _) = scope.kind {
248                 return id;
249             }
250         }
251         self.ccx.sess().bug("no loop scope found");
252     }
253
254     /// Returns a block to branch to which will perform all pending cleanups and then
255     /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
256     fn normal_exit_block(&'blk self,
257                          cleanup_scope: ast::NodeId,
258                          exit: uint) -> BasicBlockRef {
259         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
260     }
261
262     /// Returns a block to branch to which will perform all pending cleanups and then return from
263     /// this function
264     fn return_exit_block(&'blk self) -> BasicBlockRef {
265         self.trans_cleanups_to_exit_scope(ReturnExit)
266     }
267
268     fn schedule_lifetime_end(&self,
269                              cleanup_scope: ScopeId,
270                              val: ValueRef) {
271         let drop = box LifetimeEnd {
272             ptr: val,
273         };
274
275         debug!("schedule_lifetime_end({}, val={})",
276                cleanup_scope,
277                self.ccx.tn().val_to_string(val));
278
279         self.schedule_clean(cleanup_scope, drop as CleanupObj);
280     }
281
282     /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
283     fn schedule_drop_mem(&self,
284                          cleanup_scope: ScopeId,
285                          val: ValueRef,
286                          ty: Ty<'tcx>) {
287         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
288         let drop = box DropValue {
289             is_immediate: false,
290             must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
291             val: val,
292             ty: ty,
293             zero: false
294         };
295
296         debug!("schedule_drop_mem({}, val={}, ty={})",
297                cleanup_scope,
298                self.ccx.tn().val_to_string(val),
299                ty.repr(self.ccx.tcx()));
300
301         self.schedule_clean(cleanup_scope, drop as CleanupObj);
302     }
303
304     /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
305     fn schedule_drop_and_zero_mem(&self,
306                                   cleanup_scope: ScopeId,
307                                   val: ValueRef,
308                                   ty: Ty<'tcx>) {
309         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
310         let drop = box DropValue {
311             is_immediate: false,
312             must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
313             val: val,
314             ty: ty,
315             zero: true
316         };
317
318         debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
319                cleanup_scope,
320                self.ccx.tn().val_to_string(val),
321                ty.repr(self.ccx.tcx()),
322                true);
323
324         self.schedule_clean(cleanup_scope, drop as CleanupObj);
325     }
326
327     /// Schedules a (deep) drop of `val`, which is an instance of `ty`
328     fn schedule_drop_immediate(&self,
329                                cleanup_scope: ScopeId,
330                                val: ValueRef,
331                                ty: Ty<'tcx>) {
332
333         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
334         let drop = box DropValue {
335             is_immediate: true,
336             must_unwind: ty::type_needs_unwind_cleanup(self.ccx.tcx(), ty),
337             val: val,
338             ty: ty,
339             zero: false
340         };
341
342         debug!("schedule_drop_immediate({}, val={}, ty={})",
343                cleanup_scope,
344                self.ccx.tn().val_to_string(val),
345                ty.repr(self.ccx.tcx()));
346
347         self.schedule_clean(cleanup_scope, drop as CleanupObj);
348     }
349
350     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
351     fn schedule_free_value(&self,
352                            cleanup_scope: ScopeId,
353                            val: ValueRef,
354                            heap: Heap,
355                            content_ty: Ty<'tcx>) {
356         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
357
358         debug!("schedule_free_value({}, val={}, heap={})",
359                cleanup_scope,
360                self.ccx.tn().val_to_string(val),
361                heap);
362
363         self.schedule_clean(cleanup_scope, drop as CleanupObj);
364     }
365
366     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
367     fn schedule_free_slice(&self,
368                            cleanup_scope: ScopeId,
369                            val: ValueRef,
370                            size: ValueRef,
371                            align: ValueRef,
372                            heap: Heap) {
373         let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
374
375         debug!("schedule_free_slice({}, val={}, heap={})",
376                cleanup_scope,
377                self.ccx.tn().val_to_string(val),
378                heap);
379
380         self.schedule_clean(cleanup_scope, drop as CleanupObj);
381     }
382
383     fn schedule_clean(&self,
384                       cleanup_scope: ScopeId,
385                       cleanup: CleanupObj<'tcx>) {
386         match cleanup_scope {
387             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
388             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
389         }
390     }
391
392     /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
393     /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
394     /// scope.
395     fn schedule_clean_in_ast_scope(&self,
396                                    cleanup_scope: ast::NodeId,
397                                    cleanup: CleanupObj<'tcx>) {
398         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
399                cleanup_scope);
400
401         for scope in self.scopes.borrow_mut().iter_mut().rev() {
402             if scope.kind.is_ast_with_id(cleanup_scope) {
403                 scope.cleanups.push(cleanup);
404                 scope.clear_cached_exits();
405                 return;
406             } else {
407                 // will be adding a cleanup to some enclosing scope
408                 scope.clear_cached_exits();
409             }
410         }
411
412         self.ccx.sess().bug(
413             format!("no cleanup scope {} found",
414                     self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
415     }
416
417     /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
418     fn schedule_clean_in_custom_scope(&self,
419                                       custom_scope: CustomScopeIndex,
420                                       cleanup: CleanupObj<'tcx>) {
421         debug!("schedule_clean_in_custom_scope(custom_scope={})",
422                custom_scope.index);
423
424         assert!(self.is_valid_custom_scope(custom_scope));
425
426         let mut scopes = self.scopes.borrow_mut();
427         let scope = &mut (*scopes)[custom_scope.index];
428         scope.cleanups.push(cleanup);
429         scope.clear_cached_exits();
430     }
431
432     /// Returns true if there are pending cleanups that should execute on panic.
433     fn needs_invoke(&self) -> bool {
434         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
435     }
436
437     /// Returns a basic block to branch to in the event of a panic. This block will run the panic
438     /// cleanups and eventually invoke the LLVM `Resume` instruction.
439     fn get_landing_pad(&'blk self) -> BasicBlockRef {
440         let _icx = base::push_ctxt("get_landing_pad");
441
442         debug!("get_landing_pad");
443
444         let orig_scopes_len = self.scopes_len();
445         assert!(orig_scopes_len > 0);
446
447         // Remove any scopes that do not have cleanups on panic:
448         let mut popped_scopes = vec!();
449         while !self.top_scope(|s| s.needs_invoke()) {
450             debug!("top scope does not need invoke");
451             popped_scopes.push(self.pop_scope());
452         }
453
454         // Check for an existing landing pad in the new topmost scope:
455         let llbb = self.get_or_create_landing_pad();
456
457         // Push the scopes we removed back on:
458         loop {
459             match popped_scopes.pop() {
460                 Some(scope) => self.push_scope(scope),
461                 None => break
462             }
463         }
464
465         assert_eq!(self.scopes_len(), orig_scopes_len);
466
467         return llbb;
468     }
469 }
470
471 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
472     /// Returns the id of the current top-most AST scope, if any.
473     fn top_ast_scope(&self) -> Option<ast::NodeId> {
474         for scope in self.scopes.borrow().iter().rev() {
475             match scope.kind {
476                 CustomScopeKind | LoopScopeKind(..) => {}
477                 AstScopeKind(i) => {
478                     return Some(i);
479                 }
480             }
481         }
482         None
483     }
484
485     fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
486         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
487     }
488
489     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
490         self.is_valid_custom_scope(custom_scope) &&
491             custom_scope.index == self.scopes.borrow().len() - 1
492     }
493
494     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
495         let scopes = self.scopes.borrow();
496         custom_scope.index < scopes.len() &&
497             (*scopes)[custom_scope.index].kind.is_temp()
498     }
499
500     /// Generates the cleanups for `scope` into `bcx`
501     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
502                             bcx: Block<'blk, 'tcx>,
503                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
504
505         let mut bcx = bcx;
506         if !bcx.unreachable.get() {
507             for cleanup in scope.cleanups.iter().rev() {
508                 bcx = cleanup.trans(bcx, scope.debug_loc);
509             }
510         }
511         bcx
512     }
513
514     fn scopes_len(&self) -> uint {
515         self.scopes.borrow().len()
516     }
517
518     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
519         self.scopes.borrow_mut().push(scope)
520     }
521
522     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
523         debug!("popping cleanup scope {}, {} scopes remaining",
524                self.top_scope(|s| s.block_name("")),
525                self.scopes_len() - 1);
526
527         self.scopes.borrow_mut().pop().unwrap()
528     }
529
530     fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R {
531         f(self.scopes.borrow().last().unwrap())
532     }
533
534     /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
535     /// unwind. This function will generate all cleanups between the top of the stack and the exit
536     /// `label` and return a basic block that the caller can branch to.
537     ///
538     /// For example, if the current stack of cleanups were as follows:
539     ///
540     ///      AST 22
541     ///      Custom 1
542     ///      AST 23
543     ///      Loop 23
544     ///      Custom 2
545     ///      AST 24
546     ///
547     /// and the `label` specifies a break from `Loop 23`, then this function would generate a
548     /// series of basic blocks as follows:
549     ///
550     ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
551     ///
552     /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
553     /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
554     /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
555     /// `break_blk`.
556     fn trans_cleanups_to_exit_scope(&'blk self,
557                                     label: EarlyExitLabel)
558                                     -> BasicBlockRef {
559         debug!("trans_cleanups_to_exit_scope label={} scopes={}",
560                label, self.scopes_len());
561
562         let orig_scopes_len = self.scopes_len();
563         let mut prev_llbb;
564         let mut popped_scopes = vec!();
565
566         // First we pop off all the cleanup stacks that are
567         // traversed until the exit is reached, pushing them
568         // onto the side vector `popped_scopes`. No code is
569         // generated at this time.
570         //
571         // So, continuing the example from above, we would wind up
572         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
573         // (Presuming that there are no cached exits)
574         loop {
575             if self.scopes_len() == 0 {
576                 match label {
577                     UnwindExit => {
578                         // Generate a block that will `Resume`.
579                         let prev_bcx = self.new_block(true, "resume", None);
580                         let personality = self.personality.get().expect(
581                             "create_landing_pad() should have set this");
582                         build::Resume(prev_bcx,
583                                       build::Load(prev_bcx, personality));
584                         prev_llbb = prev_bcx.llbb;
585                         break;
586                     }
587
588                     ReturnExit => {
589                         prev_llbb = self.get_llreturn();
590                         break;
591                     }
592
593                     LoopExit(id, _) => {
594                         self.ccx.sess().bug(format!(
595                                 "cannot exit from scope {}, \
596                                 not in scope", id).as_slice());
597                     }
598                 }
599             }
600
601             // Check if we have already cached the unwinding of this
602             // scope for this label. If so, we can stop popping scopes
603             // and branch to the cached label, since it contains the
604             // cleanups for any subsequent scopes.
605             match self.top_scope(|s| s.cached_early_exit(label)) {
606                 Some(cleanup_block) => {
607                     prev_llbb = cleanup_block;
608                     break;
609                 }
610                 None => { }
611             }
612
613             // Pop off the scope, since we will be generating
614             // unwinding code for it. If we are searching for a loop exit,
615             // and this scope is that loop, then stop popping and set
616             // `prev_llbb` to the appropriate exit block from the loop.
617             popped_scopes.push(self.pop_scope());
618             let scope = popped_scopes.last().unwrap();
619             match label {
620                 UnwindExit | ReturnExit => { }
621                 LoopExit(id, exit) => {
622                     match scope.kind.early_exit_block(id, exit) {
623                         Some(exitllbb) => {
624                             prev_llbb = exitllbb;
625                             break;
626                         }
627
628                         None => { }
629                     }
630                 }
631             }
632         }
633
634         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
635                popped_scopes.len());
636
637         // Now push the popped scopes back on. As we go,
638         // we track in `prev_llbb` the exit to which this scope
639         // should branch when it's done.
640         //
641         // So, continuing with our example, we will start out with
642         // `prev_llbb` being set to `break_blk` (or possibly a cached
643         // early exit). We will then pop the scopes from `popped_scopes`
644         // and generate a basic block for each one, prepending it in the
645         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
646         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
647         // branch to `prev_llbb == break_blk`, giving us a sequence like:
648         //
649         //     Cleanup(Custom 2) -> prev_llbb
650         //
651         // We then pop `AST 24` and repeat the process, giving us the sequence:
652         //
653         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
654         //
655         // At this point, `popped_scopes` is empty, and so the final block
656         // that we return to the user is `Cleanup(AST 24)`.
657         while !popped_scopes.is_empty() {
658             let mut scope = popped_scopes.pop().unwrap();
659
660             if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
661             {
662                 let name = scope.block_name("clean");
663                 debug!("generating cleanups for {}", name);
664                 let bcx_in = self.new_block(label.is_unwind(),
665                                             name.as_slice(),
666                                             None);
667                 let mut bcx_out = bcx_in;
668                 for cleanup in scope.cleanups.iter().rev() {
669                     if cleanup_is_suitable_for(&**cleanup, label) {
670                         bcx_out = cleanup.trans(bcx_out,
671                                                 scope.debug_loc);
672                     }
673                 }
674                 build::Br(bcx_out, prev_llbb);
675                 prev_llbb = bcx_in.llbb;
676             } else {
677                 debug!("no suitable cleanups in {}",
678                        scope.block_name("clean"));
679             }
680
681             scope.add_cached_early_exit(label, prev_llbb);
682             self.push_scope(scope);
683         }
684
685         debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
686
687         assert_eq!(self.scopes_len(), orig_scopes_len);
688         prev_llbb
689     }
690
691     /// Creates a landing pad for the top scope, if one does not exist.  The landing pad will
692     /// perform all cleanups necessary for an unwind and then `resume` to continue error
693     /// propagation:
694     ///
695     ///     landing_pad -> ... cleanups ... -> [resume]
696     ///
697     /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
698     /// in this function itself.)
699     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
700         let pad_bcx;
701
702         debug!("get_or_create_landing_pad");
703
704         // Check if a landing pad block exists; if not, create one.
705         {
706             let mut scopes = self.scopes.borrow_mut();
707             let last_scope = scopes.last_mut().unwrap();
708             match last_scope.cached_landing_pad {
709                 Some(llbb) => { return llbb; }
710                 None => {
711                     let name = last_scope.block_name("unwind");
712                     pad_bcx = self.new_block(true, name.as_slice(), None);
713                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
714                 }
715             }
716         }
717
718         // The landing pad return type (the type being propagated). Not sure what
719         // this represents but it's determined by the personality function and
720         // this is what the EH proposal example uses.
721         let llretty = Type::struct_(self.ccx,
722                                     &[Type::i8p(self.ccx), Type::i32(self.ccx)],
723                                     false);
724
725         // The exception handling personality function.
726         //
727         // If our compilation unit has the `eh_personality` lang item somewhere
728         // within it, then we just need to translate that. Otherwise, we're
729         // building an rlib which will depend on some upstream implementation of
730         // this function, so we just codegen a generic reference to it. We don't
731         // specify any of the types for the function, we just make it a symbol
732         // that LLVM can later use.
733         let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
734             Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
735             None => {
736                 let mut personality = self.ccx.eh_personality().borrow_mut();
737                 match *personality {
738                     Some(llpersonality) => llpersonality,
739                     None => {
740                         let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
741                         let f = base::decl_cdecl_fn(self.ccx,
742                                                     "rust_eh_personality",
743                                                     fty,
744                                                     ty::mk_i32());
745                         *personality = Some(f);
746                         f
747                     }
748                 }
749             }
750         };
751
752         // The only landing pad clause will be 'cleanup'
753         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
754
755         // The landing pad block is a cleanup
756         build::SetCleanup(pad_bcx, llretval);
757
758         // We store the retval in a function-central alloca, so that calls to
759         // Resume can find it.
760         match self.personality.get() {
761             Some(addr) => {
762                 build::Store(pad_bcx, llretval, addr);
763             }
764             None => {
765                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
766                 self.personality.set(Some(addr));
767                 build::Store(pad_bcx, llretval, addr);
768             }
769         }
770
771         // Generate the cleanup block and branch to it.
772         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
773         build::Br(pad_bcx, cleanup_llbb);
774
775         return pad_bcx.llbb;
776     }
777 }
778
779 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
780     fn new(kind: CleanupScopeKind<'blk, 'tcx>,
781            debug_loc: Option<NodeInfo>)
782         -> CleanupScope<'blk, 'tcx> {
783         CleanupScope {
784             kind: kind,
785             debug_loc: debug_loc,
786             cleanups: vec!(),
787             cached_early_exits: vec!(),
788             cached_landing_pad: None,
789         }
790     }
791
792     fn clear_cached_exits(&mut self) {
793         self.cached_early_exits = vec!();
794         self.cached_landing_pad = None;
795     }
796
797     fn cached_early_exit(&self,
798                          label: EarlyExitLabel)
799                          -> Option<BasicBlockRef> {
800         self.cached_early_exits.iter().
801             find(|e| e.label == label).
802             map(|e| e.cleanup_block)
803     }
804
805     fn add_cached_early_exit(&mut self,
806                              label: EarlyExitLabel,
807                              blk: BasicBlockRef) {
808         self.cached_early_exits.push(
809             CachedEarlyExit { label: label,
810                               cleanup_block: blk });
811     }
812
813     /// True if this scope has cleanups that need unwinding
814     fn needs_invoke(&self) -> bool {
815
816         self.cached_landing_pad.is_some() ||
817             self.cleanups.iter().any(|c| c.must_unwind())
818     }
819
820     /// Returns a suitable name to use for the basic block that handles this cleanup scope
821     fn block_name(&self, prefix: &str) -> String {
822         match self.kind {
823             CustomScopeKind => format!("{}_custom_", prefix),
824             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
825             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
826         }
827     }
828
829     pub fn drop_non_lifetime_clean(&mut self) {
830         self.cleanups.retain(|c| c.is_lifetime_end());
831     }
832 }
833
834 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
835     fn is_temp(&self) -> bool {
836         match *self {
837             CustomScopeKind => true,
838             LoopScopeKind(..) | AstScopeKind(..) => false,
839         }
840     }
841
842     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
843         match *self {
844             CustomScopeKind | LoopScopeKind(..) => false,
845             AstScopeKind(i) => i == id
846         }
847     }
848
849     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
850         match *self {
851             CustomScopeKind | AstScopeKind(..) => false,
852             LoopScopeKind(i, _) => i == id
853         }
854     }
855
856     /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
857     fn early_exit_block(&self,
858                         id: ast::NodeId,
859                         exit: uint) -> Option<BasicBlockRef> {
860         match *self {
861             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
862             _ => None,
863         }
864     }
865 }
866
867 impl EarlyExitLabel {
868     fn is_unwind(&self) -> bool {
869         match *self {
870             UnwindExit => true,
871             _ => false
872         }
873     }
874 }
875
876 ///////////////////////////////////////////////////////////////////////////
877 // Cleanup types
878
879 pub struct DropValue<'tcx> {
880     is_immediate: bool,
881     must_unwind: bool,
882     val: ValueRef,
883     ty: Ty<'tcx>,
884     zero: bool
885 }
886
887 impl<'tcx> Copy for DropValue<'tcx> {}
888
889 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
890     fn must_unwind(&self) -> bool {
891         self.must_unwind
892     }
893
894     fn clean_on_unwind(&self) -> bool {
895         self.must_unwind
896     }
897
898     fn is_lifetime_end(&self) -> bool {
899         false
900     }
901
902     fn trans<'blk>(&self,
903                    bcx: Block<'blk, 'tcx>,
904                    debug_loc: Option<NodeInfo>)
905                    -> Block<'blk, 'tcx> {
906         let bcx = if self.is_immediate {
907             glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
908         } else {
909             glue::drop_ty(bcx, self.val, self.ty, debug_loc)
910         };
911         if self.zero {
912             base::zero_mem(bcx, self.val, self.ty);
913         }
914         bcx
915     }
916 }
917
918 #[deriving(Show)]
919 pub enum Heap {
920     HeapExchange
921 }
922
923 impl Copy for Heap {}
924
925 pub struct FreeValue<'tcx> {
926     ptr: ValueRef,
927     heap: Heap,
928     content_ty: Ty<'tcx>
929 }
930
931 impl<'tcx> Copy for FreeValue<'tcx> {}
932
933 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
934     fn must_unwind(&self) -> bool {
935         true
936     }
937
938     fn clean_on_unwind(&self) -> bool {
939         true
940     }
941
942     fn is_lifetime_end(&self) -> bool {
943         false
944     }
945
946     fn trans<'blk>(&self,
947                    bcx: Block<'blk, 'tcx>,
948                    debug_loc: Option<NodeInfo>)
949                    -> Block<'blk, 'tcx> {
950         apply_debug_loc(bcx.fcx, debug_loc);
951
952         match self.heap {
953             HeapExchange => {
954                 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
955             }
956         }
957     }
958 }
959
960 pub struct FreeSlice {
961     ptr: ValueRef,
962     size: ValueRef,
963     align: ValueRef,
964     heap: Heap,
965 }
966
967 impl Copy for FreeSlice {}
968
969 impl<'tcx> Cleanup<'tcx> for FreeSlice {
970     fn must_unwind(&self) -> bool {
971         true
972     }
973
974     fn clean_on_unwind(&self) -> bool {
975         true
976     }
977
978     fn is_lifetime_end(&self) -> bool {
979         false
980     }
981
982     fn trans<'blk, 'tcx>(&self,
983                          bcx: Block<'blk, 'tcx>,
984                          debug_loc: Option<NodeInfo>)
985                       -> Block<'blk, 'tcx> {
986         apply_debug_loc(bcx.fcx, debug_loc);
987
988         match self.heap {
989             HeapExchange => {
990                 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
991             }
992         }
993     }
994 }
995
996 pub struct LifetimeEnd {
997     ptr: ValueRef,
998 }
999
1000 impl Copy for LifetimeEnd {}
1001
1002 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
1003     fn must_unwind(&self) -> bool {
1004         false
1005     }
1006
1007     fn clean_on_unwind(&self) -> bool {
1008         true
1009     }
1010
1011     fn is_lifetime_end(&self) -> bool {
1012         true
1013     }
1014
1015     fn trans<'blk, 'tcx>(&self,
1016                          bcx: Block<'blk, 'tcx>,
1017                          debug_loc: Option<NodeInfo>)
1018                       -> Block<'blk, 'tcx> {
1019         apply_debug_loc(bcx.fcx, debug_loc);
1020         base::call_lifetime_end(bcx, self.ptr);
1021         bcx
1022     }
1023 }
1024
1025 pub fn temporary_scope(tcx: &ty::ctxt,
1026                        id: ast::NodeId)
1027                        -> ScopeId {
1028     match tcx.region_maps.temporary_scope(id) {
1029         Some(scope) => {
1030             let r = AstScope(scope.node_id());
1031             debug!("temporary_scope({}) = {}", id, r);
1032             r
1033         }
1034         None => {
1035             tcx.sess.bug(format!("no temporary scope available for expr {}",
1036                                  id).as_slice())
1037         }
1038     }
1039 }
1040
1041 pub fn var_scope(tcx: &ty::ctxt,
1042                  id: ast::NodeId)
1043                  -> ScopeId {
1044     let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1045     debug!("var_scope({}) = {}", id, r);
1046     r
1047 }
1048
1049 fn cleanup_is_suitable_for(c: &Cleanup,
1050                            label: EarlyExitLabel) -> bool {
1051     !label.is_unwind() || c.clean_on_unwind()
1052 }
1053
1054 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1055     match debug_loc {
1056         Some(ref src_loc) => {
1057             debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1058         }
1059         None => {
1060             debuginfo::clear_source_location(fcx);
1061         }
1062     }
1063 }
1064
1065 ///////////////////////////////////////////////////////////////////////////
1066 // These traits just exist to put the methods into this file.
1067
1068 pub trait CleanupMethods<'blk, 'tcx> {
1069     fn push_ast_cleanup_scope(&self, id: NodeInfo);
1070     fn push_loop_cleanup_scope(&self,
1071                                id: ast::NodeId,
1072                                exits: [Block<'blk, 'tcx>, ..EXIT_MAX]);
1073     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1074     fn push_custom_cleanup_scope_with_debug_loc(&self,
1075                                                 debug_loc: NodeInfo)
1076                                                 -> CustomScopeIndex;
1077     fn pop_and_trans_ast_cleanup_scope(&self,
1078                                               bcx: Block<'blk, 'tcx>,
1079                                               cleanup_scope: ast::NodeId)
1080                                               -> Block<'blk, 'tcx>;
1081     fn pop_loop_cleanup_scope(&self,
1082                               cleanup_scope: ast::NodeId);
1083     fn pop_custom_cleanup_scope(&self,
1084                                 custom_scope: CustomScopeIndex);
1085     fn pop_and_trans_custom_cleanup_scope(&self,
1086                                           bcx: Block<'blk, 'tcx>,
1087                                           custom_scope: CustomScopeIndex)
1088                                           -> Block<'blk, 'tcx>;
1089     fn top_loop_scope(&self) -> ast::NodeId;
1090     fn normal_exit_block(&'blk self,
1091                          cleanup_scope: ast::NodeId,
1092                          exit: uint) -> BasicBlockRef;
1093     fn return_exit_block(&'blk self) -> BasicBlockRef;
1094     fn schedule_lifetime_end(&self,
1095                          cleanup_scope: ScopeId,
1096                          val: ValueRef);
1097     fn schedule_drop_mem(&self,
1098                          cleanup_scope: ScopeId,
1099                          val: ValueRef,
1100                          ty: Ty<'tcx>);
1101     fn schedule_drop_and_zero_mem(&self,
1102                                   cleanup_scope: ScopeId,
1103                                   val: ValueRef,
1104                                   ty: Ty<'tcx>);
1105     fn schedule_drop_immediate(&self,
1106                                cleanup_scope: ScopeId,
1107                                val: ValueRef,
1108                                ty: Ty<'tcx>);
1109     fn schedule_free_value(&self,
1110                            cleanup_scope: ScopeId,
1111                            val: ValueRef,
1112                            heap: Heap,
1113                            content_ty: Ty<'tcx>);
1114     fn schedule_free_slice(&self,
1115                            cleanup_scope: ScopeId,
1116                            val: ValueRef,
1117                            size: ValueRef,
1118                            align: ValueRef,
1119                            heap: Heap);
1120     fn schedule_clean(&self,
1121                       cleanup_scope: ScopeId,
1122                       cleanup: CleanupObj<'tcx>);
1123     fn schedule_clean_in_ast_scope(&self,
1124                                    cleanup_scope: ast::NodeId,
1125                                    cleanup: CleanupObj<'tcx>);
1126     fn schedule_clean_in_custom_scope(&self,
1127                                     custom_scope: CustomScopeIndex,
1128                                     cleanup: CleanupObj<'tcx>);
1129     fn needs_invoke(&self) -> bool;
1130     fn get_landing_pad(&'blk self) -> BasicBlockRef;
1131 }
1132
1133 trait CleanupHelperMethods<'blk, 'tcx> {
1134     fn top_ast_scope(&self) -> Option<ast::NodeId>;
1135     fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1136     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1137     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1138     fn trans_scope_cleanups(&self,
1139                             bcx: Block<'blk, 'tcx>,
1140                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1141     fn trans_cleanups_to_exit_scope(&'blk self,
1142                                     label: EarlyExitLabel)
1143                                     -> BasicBlockRef;
1144     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1145     fn scopes_len(&self) -> uint;
1146     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1147     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1148     fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R;
1149 }