]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/cleanup.rs
92a96cd02b501c67f64af211a42772724a82e2ea
[rust.git] / src / librustc_trans / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Code pertaining to cleanup of temporaries as well as execution of
12 //! drop glue. See discussion in `doc.rs` for a high-level summary.
13
14 pub use self::ScopeId::*;
15 pub use self::CleanupScopeKind::*;
16 pub use self::EarlyExitLabel::*;
17 pub use self::Heap::*;
18
19 use llvm::{BasicBlockRef, ValueRef};
20 use trans::base;
21 use trans::build;
22 use trans::callee;
23 use trans::common;
24 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
25 use trans::debuginfo;
26 use trans::glue;
27 use middle::region;
28 use trans::type_::Type;
29 use middle::ty::{self, Ty};
30 use std::fmt;
31 use syntax::ast;
32 use util::ppaux::Repr;
33
34 pub struct CleanupScope<'blk, 'tcx: 'blk> {
35     // The id of this cleanup scope. If the id is None,
36     // this is a *temporary scope* that is pushed during trans to
37     // cleanup miscellaneous garbage that trans may generate whose
38     // lifetime is a subset of some expression.  See module doc for
39     // more details.
40     kind: CleanupScopeKind<'blk, 'tcx>,
41
42     // Cleanups to run upon scope exit.
43     cleanups: Vec<CleanupObj<'tcx>>,
44
45     // The debug location any drop calls generated for this scope will be
46     // associated with.
47     debug_loc: Option<NodeInfo>,
48
49     cached_early_exits: Vec<CachedEarlyExit>,
50     cached_landing_pad: Option<BasicBlockRef>,
51 }
52
53 #[derive(Copy, Show)]
54 pub struct CustomScopeIndex {
55     index: uint
56 }
57
58 pub const EXIT_BREAK: uint = 0;
59 pub const EXIT_LOOP: uint = 1;
60 pub const EXIT_MAX: uint = 2;
61
62 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
63     CustomScopeKind,
64     AstScopeKind(ast::NodeId),
65     LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
66 }
67
68 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
69     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
70         match *self {
71             CustomScopeKind => write!(f, "CustomScopeKind"),
72             AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
73             LoopScopeKind(nid, ref blks) => {
74                 try!(write!(f, "LoopScopeKind({}, [", nid));
75                 for blk in blks.iter() {
76                     try!(write!(f, "{:p}, ", blk));
77                 }
78                 write!(f, "])")
79             }
80         }
81     }
82 }
83
84 #[derive(Copy, PartialEq, Show)]
85 pub enum EarlyExitLabel {
86     UnwindExit,
87     ReturnExit,
88     LoopExit(ast::NodeId, uint)
89 }
90
91 #[derive(Copy)]
92 pub struct CachedEarlyExit {
93     label: EarlyExitLabel,
94     cleanup_block: BasicBlockRef,
95 }
96
97 pub trait Cleanup<'tcx> {
98     fn must_unwind(&self) -> bool;
99     fn clean_on_unwind(&self) -> bool;
100     fn is_lifetime_end(&self) -> bool;
101     fn trans<'blk>(&self,
102                    bcx: Block<'blk, 'tcx>,
103                    debug_loc: Option<NodeInfo>)
104                    -> Block<'blk, 'tcx>;
105 }
106
107 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
108
109 #[derive(Copy, Show)]
110 pub enum ScopeId {
111     AstScope(ast::NodeId),
112     CustomScope(CustomScopeIndex)
113 }
114
115 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
116     /// Invoked when we start to trans the code contained within a new cleanup scope.
117     fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
118         debug!("push_ast_cleanup_scope({})",
119                self.ccx.tcx().map.node_to_string(debug_loc.id));
120
121         // FIXME(#2202) -- currently closure bodies have a parent
122         // region, which messes up the assertion below, since there
123         // are no cleanup scopes on the stack at the start of
124         // trans'ing a closure body.  I think though that this should
125         // eventually be fixed by closure bodies not having a parent
126         // region, though that's a touch unclear, and it might also be
127         // better just to narrow this assertion more (i.e., by
128         // excluding id's that correspond to closure bodies only). For
129         // now we just say that if there is already an AST scope on the stack,
130         // this new AST scope had better be its immediate child.
131         let top_scope = self.top_ast_scope();
132         if top_scope.is_some() {
133             assert_eq!(self.ccx
134                            .tcx()
135                            .region_maps
136                            .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
137                            .map(|s|s.node_id()),
138                        top_scope);
139         }
140
141         self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
142                                           Some(debug_loc)));
143     }
144
145     fn push_loop_cleanup_scope(&self,
146                                id: ast::NodeId,
147                                exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
148         debug!("push_loop_cleanup_scope({})",
149                self.ccx.tcx().map.node_to_string(id));
150         assert_eq!(Some(id), self.top_ast_scope());
151
152         // Just copy the debuginfo source location from the enclosing scope
153         let debug_loc = self.scopes
154                             .borrow()
155                             .last()
156                             .unwrap()
157                             .debug_loc;
158
159         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
160     }
161
162     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
163         let index = self.scopes_len();
164         debug!("push_custom_cleanup_scope(): {}", index);
165
166         // Just copy the debuginfo source location from the enclosing scope
167         let debug_loc = self.scopes
168                             .borrow()
169                             .last()
170                             .map(|opt_scope| opt_scope.debug_loc)
171                             .unwrap_or(None);
172
173         self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
174         CustomScopeIndex { index: index }
175     }
176
177     fn push_custom_cleanup_scope_with_debug_loc(&self,
178                                                 debug_loc: NodeInfo)
179                                                 -> CustomScopeIndex {
180         let index = self.scopes_len();
181         debug!("push_custom_cleanup_scope(): {}", index);
182
183         self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
184         CustomScopeIndex { index: index }
185     }
186
187     /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
188     /// stack, and generates the code to do its cleanups for normal exit.
189     fn pop_and_trans_ast_cleanup_scope(&self,
190                                        bcx: Block<'blk, 'tcx>,
191                                        cleanup_scope: ast::NodeId)
192                                        -> Block<'blk, 'tcx> {
193         debug!("pop_and_trans_ast_cleanup_scope({})",
194                self.ccx.tcx().map.node_to_string(cleanup_scope));
195
196         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
197
198         let scope = self.pop_scope();
199         self.trans_scope_cleanups(bcx, &scope)
200     }
201
202     /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
203     /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
204     /// branching to a block generated by `normal_exit_block`.
205     fn pop_loop_cleanup_scope(&self,
206                               cleanup_scope: ast::NodeId) {
207         debug!("pop_loop_cleanup_scope({})",
208                self.ccx.tcx().map.node_to_string(cleanup_scope));
209
210         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
211
212         let _ = self.pop_scope();
213     }
214
215     /// Removes the top cleanup scope from the stack without executing its cleanups. The top
216     /// cleanup scope must be the temporary scope `custom_scope`.
217     fn pop_custom_cleanup_scope(&self,
218                                 custom_scope: CustomScopeIndex) {
219         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
220         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
221         let _ = self.pop_scope();
222     }
223
224     /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
225     /// generates the code to do its cleanups for normal exit.
226     fn pop_and_trans_custom_cleanup_scope(&self,
227                                           bcx: Block<'blk, 'tcx>,
228                                           custom_scope: CustomScopeIndex)
229                                           -> Block<'blk, 'tcx> {
230         debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
231         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
232
233         let scope = self.pop_scope();
234         self.trans_scope_cleanups(bcx, &scope)
235     }
236
237     /// Returns the id of the top-most loop scope
238     fn top_loop_scope(&self) -> ast::NodeId {
239         for scope in self.scopes.borrow().iter().rev() {
240             if let LoopScopeKind(id, _) = scope.kind {
241                 return id;
242             }
243         }
244         self.ccx.sess().bug("no loop scope found");
245     }
246
247     /// Returns a block to branch to which will perform all pending cleanups and then
248     /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
249     fn normal_exit_block(&'blk self,
250                          cleanup_scope: ast::NodeId,
251                          exit: uint) -> BasicBlockRef {
252         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
253     }
254
255     /// Returns a block to branch to which will perform all pending cleanups and then return from
256     /// this function
257     fn return_exit_block(&'blk self) -> BasicBlockRef {
258         self.trans_cleanups_to_exit_scope(ReturnExit)
259     }
260
261     fn schedule_lifetime_end(&self,
262                              cleanup_scope: ScopeId,
263                              val: ValueRef) {
264         let drop = box LifetimeEnd {
265             ptr: val,
266         };
267
268         debug!("schedule_lifetime_end({:?}, val={})",
269                cleanup_scope,
270                self.ccx.tn().val_to_string(val));
271
272         self.schedule_clean(cleanup_scope, drop as CleanupObj);
273     }
274
275     /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
276     fn schedule_drop_mem(&self,
277                          cleanup_scope: ScopeId,
278                          val: ValueRef,
279                          ty: Ty<'tcx>) {
280         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
281         let drop = box DropValue {
282             is_immediate: false,
283             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
284             val: val,
285             ty: ty,
286             zero: false
287         };
288
289         debug!("schedule_drop_mem({:?}, val={}, ty={})",
290                cleanup_scope,
291                self.ccx.tn().val_to_string(val),
292                ty.repr(self.ccx.tcx()));
293
294         self.schedule_clean(cleanup_scope, drop as CleanupObj);
295     }
296
297     /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
298     fn schedule_drop_and_zero_mem(&self,
299                                   cleanup_scope: ScopeId,
300                                   val: ValueRef,
301                                   ty: Ty<'tcx>) {
302         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
303         let drop = box DropValue {
304             is_immediate: false,
305             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
306             val: val,
307             ty: ty,
308             zero: true
309         };
310
311         debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
312                cleanup_scope,
313                self.ccx.tn().val_to_string(val),
314                ty.repr(self.ccx.tcx()),
315                true);
316
317         self.schedule_clean(cleanup_scope, drop as CleanupObj);
318     }
319
320     /// Schedules a (deep) drop of `val`, which is an instance of `ty`
321     fn schedule_drop_immediate(&self,
322                                cleanup_scope: ScopeId,
323                                val: ValueRef,
324                                ty: Ty<'tcx>) {
325
326         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
327         let drop = box DropValue {
328             is_immediate: true,
329             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
330             val: val,
331             ty: ty,
332             zero: false
333         };
334
335         debug!("schedule_drop_immediate({:?}, val={}, ty={:?})",
336                cleanup_scope,
337                self.ccx.tn().val_to_string(val),
338                ty.repr(self.ccx.tcx()));
339
340         self.schedule_clean(cleanup_scope, drop as CleanupObj);
341     }
342
343     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
344     fn schedule_free_value(&self,
345                            cleanup_scope: ScopeId,
346                            val: ValueRef,
347                            heap: Heap,
348                            content_ty: Ty<'tcx>) {
349         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
350
351         debug!("schedule_free_value({:?}, val={}, heap={:?})",
352                cleanup_scope,
353                self.ccx.tn().val_to_string(val),
354                heap);
355
356         self.schedule_clean(cleanup_scope, drop as CleanupObj);
357     }
358
359     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
360     fn schedule_free_slice(&self,
361                            cleanup_scope: ScopeId,
362                            val: ValueRef,
363                            size: ValueRef,
364                            align: ValueRef,
365                            heap: Heap) {
366         let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
367
368         debug!("schedule_free_slice({:?}, val={}, heap={:?})",
369                cleanup_scope,
370                self.ccx.tn().val_to_string(val),
371                heap);
372
373         self.schedule_clean(cleanup_scope, drop as CleanupObj);
374     }
375
376     fn schedule_clean(&self,
377                       cleanup_scope: ScopeId,
378                       cleanup: CleanupObj<'tcx>) {
379         match cleanup_scope {
380             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
381             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
382         }
383     }
384
385     /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
386     /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
387     /// scope.
388     fn schedule_clean_in_ast_scope(&self,
389                                    cleanup_scope: ast::NodeId,
390                                    cleanup: CleanupObj<'tcx>) {
391         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
392                cleanup_scope);
393
394         for scope in self.scopes.borrow_mut().iter_mut().rev() {
395             if scope.kind.is_ast_with_id(cleanup_scope) {
396                 scope.cleanups.push(cleanup);
397                 scope.clear_cached_exits();
398                 return;
399             } else {
400                 // will be adding a cleanup to some enclosing scope
401                 scope.clear_cached_exits();
402             }
403         }
404
405         self.ccx.sess().bug(
406             format!("no cleanup scope {} found",
407                     self.ccx.tcx().map.node_to_string(cleanup_scope)).index(&FullRange));
408     }
409
410     /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
411     fn schedule_clean_in_custom_scope(&self,
412                                       custom_scope: CustomScopeIndex,
413                                       cleanup: CleanupObj<'tcx>) {
414         debug!("schedule_clean_in_custom_scope(custom_scope={})",
415                custom_scope.index);
416
417         assert!(self.is_valid_custom_scope(custom_scope));
418
419         let mut scopes = self.scopes.borrow_mut();
420         let scope = &mut (*scopes)[custom_scope.index];
421         scope.cleanups.push(cleanup);
422         scope.clear_cached_exits();
423     }
424
425     /// Returns true if there are pending cleanups that should execute on panic.
426     fn needs_invoke(&self) -> bool {
427         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
428     }
429
430     /// Returns a basic block to branch to in the event of a panic. This block will run the panic
431     /// cleanups and eventually invoke the LLVM `Resume` instruction.
432     fn get_landing_pad(&'blk self) -> BasicBlockRef {
433         let _icx = base::push_ctxt("get_landing_pad");
434
435         debug!("get_landing_pad");
436
437         let orig_scopes_len = self.scopes_len();
438         assert!(orig_scopes_len > 0);
439
440         // Remove any scopes that do not have cleanups on panic:
441         let mut popped_scopes = vec!();
442         while !self.top_scope(|s| s.needs_invoke()) {
443             debug!("top scope does not need invoke");
444             popped_scopes.push(self.pop_scope());
445         }
446
447         // Check for an existing landing pad in the new topmost scope:
448         let llbb = self.get_or_create_landing_pad();
449
450         // Push the scopes we removed back on:
451         loop {
452             match popped_scopes.pop() {
453                 Some(scope) => self.push_scope(scope),
454                 None => break
455             }
456         }
457
458         assert_eq!(self.scopes_len(), orig_scopes_len);
459
460         return llbb;
461     }
462 }
463
464 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
465     /// Returns the id of the current top-most AST scope, if any.
466     fn top_ast_scope(&self) -> Option<ast::NodeId> {
467         for scope in self.scopes.borrow().iter().rev() {
468             match scope.kind {
469                 CustomScopeKind | LoopScopeKind(..) => {}
470                 AstScopeKind(i) => {
471                     return Some(i);
472                 }
473             }
474         }
475         None
476     }
477
478     fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
479         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
480     }
481
482     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
483         self.is_valid_custom_scope(custom_scope) &&
484             custom_scope.index == self.scopes.borrow().len() - 1
485     }
486
487     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
488         let scopes = self.scopes.borrow();
489         custom_scope.index < scopes.len() &&
490             (*scopes)[custom_scope.index].kind.is_temp()
491     }
492
493     /// Generates the cleanups for `scope` into `bcx`
494     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
495                             bcx: Block<'blk, 'tcx>,
496                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
497
498         let mut bcx = bcx;
499         if !bcx.unreachable.get() {
500             for cleanup in scope.cleanups.iter().rev() {
501                 bcx = cleanup.trans(bcx, scope.debug_loc);
502             }
503         }
504         bcx
505     }
506
507     fn scopes_len(&self) -> uint {
508         self.scopes.borrow().len()
509     }
510
511     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
512         self.scopes.borrow_mut().push(scope)
513     }
514
515     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
516         debug!("popping cleanup scope {}, {} scopes remaining",
517                self.top_scope(|s| s.block_name("")),
518                self.scopes_len() - 1);
519
520         self.scopes.borrow_mut().pop().unwrap()
521     }
522
523     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
524         f(self.scopes.borrow().last().unwrap())
525     }
526
527     /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
528     /// unwind. This function will generate all cleanups between the top of the stack and the exit
529     /// `label` and return a basic block that the caller can branch to.
530     ///
531     /// For example, if the current stack of cleanups were as follows:
532     ///
533     ///      AST 22
534     ///      Custom 1
535     ///      AST 23
536     ///      Loop 23
537     ///      Custom 2
538     ///      AST 24
539     ///
540     /// and the `label` specifies a break from `Loop 23`, then this function would generate a
541     /// series of basic blocks as follows:
542     ///
543     ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
544     ///
545     /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
546     /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
547     /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
548     /// `break_blk`.
549     fn trans_cleanups_to_exit_scope(&'blk self,
550                                     label: EarlyExitLabel)
551                                     -> BasicBlockRef {
552         debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
553                label, self.scopes_len());
554
555         let orig_scopes_len = self.scopes_len();
556         let mut prev_llbb;
557         let mut popped_scopes = vec!();
558
559         // First we pop off all the cleanup stacks that are
560         // traversed until the exit is reached, pushing them
561         // onto the side vector `popped_scopes`. No code is
562         // generated at this time.
563         //
564         // So, continuing the example from above, we would wind up
565         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
566         // (Presuming that there are no cached exits)
567         loop {
568             if self.scopes_len() == 0 {
569                 match label {
570                     UnwindExit => {
571                         // Generate a block that will `Resume`.
572                         let prev_bcx = self.new_block(true, "resume", None);
573                         let personality = self.personality.get().expect(
574                             "create_landing_pad() should have set this");
575                         build::Resume(prev_bcx,
576                                       build::Load(prev_bcx, personality));
577                         prev_llbb = prev_bcx.llbb;
578                         break;
579                     }
580
581                     ReturnExit => {
582                         prev_llbb = self.get_llreturn();
583                         break;
584                     }
585
586                     LoopExit(id, _) => {
587                         self.ccx.sess().bug(format!(
588                                 "cannot exit from scope {}, \
589                                 not in scope", id).index(&FullRange));
590                     }
591                 }
592             }
593
594             // Check if we have already cached the unwinding of this
595             // scope for this label. If so, we can stop popping scopes
596             // and branch to the cached label, since it contains the
597             // cleanups for any subsequent scopes.
598             match self.top_scope(|s| s.cached_early_exit(label)) {
599                 Some(cleanup_block) => {
600                     prev_llbb = cleanup_block;
601                     break;
602                 }
603                 None => { }
604             }
605
606             // Pop off the scope, since we will be generating
607             // unwinding code for it. If we are searching for a loop exit,
608             // and this scope is that loop, then stop popping and set
609             // `prev_llbb` to the appropriate exit block from the loop.
610             popped_scopes.push(self.pop_scope());
611             let scope = popped_scopes.last().unwrap();
612             match label {
613                 UnwindExit | ReturnExit => { }
614                 LoopExit(id, exit) => {
615                     match scope.kind.early_exit_block(id, exit) {
616                         Some(exitllbb) => {
617                             prev_llbb = exitllbb;
618                             break;
619                         }
620
621                         None => { }
622                     }
623                 }
624             }
625         }
626
627         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
628                popped_scopes.len());
629
630         // Now push the popped scopes back on. As we go,
631         // we track in `prev_llbb` the exit to which this scope
632         // should branch when it's done.
633         //
634         // So, continuing with our example, we will start out with
635         // `prev_llbb` being set to `break_blk` (or possibly a cached
636         // early exit). We will then pop the scopes from `popped_scopes`
637         // and generate a basic block for each one, prepending it in the
638         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
639         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
640         // branch to `prev_llbb == break_blk`, giving us a sequence like:
641         //
642         //     Cleanup(Custom 2) -> prev_llbb
643         //
644         // We then pop `AST 24` and repeat the process, giving us the sequence:
645         //
646         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
647         //
648         // At this point, `popped_scopes` is empty, and so the final block
649         // that we return to the user is `Cleanup(AST 24)`.
650         while !popped_scopes.is_empty() {
651             let mut scope = popped_scopes.pop().unwrap();
652
653             if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
654             {
655                 let name = scope.block_name("clean");
656                 debug!("generating cleanups for {}", name);
657                 let bcx_in = self.new_block(label.is_unwind(),
658                                             name.index(&FullRange),
659                                             None);
660                 let mut bcx_out = bcx_in;
661                 for cleanup in scope.cleanups.iter().rev() {
662                     if cleanup_is_suitable_for(&**cleanup, label) {
663                         bcx_out = cleanup.trans(bcx_out,
664                                                 scope.debug_loc);
665                     }
666                 }
667                 build::Br(bcx_out, prev_llbb);
668                 prev_llbb = bcx_in.llbb;
669             } else {
670                 debug!("no suitable cleanups in {}",
671                        scope.block_name("clean"));
672             }
673
674             scope.add_cached_early_exit(label, prev_llbb);
675             self.push_scope(scope);
676         }
677
678         debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
679
680         assert_eq!(self.scopes_len(), orig_scopes_len);
681         prev_llbb
682     }
683
684     /// Creates a landing pad for the top scope, if one does not exist.  The landing pad will
685     /// perform all cleanups necessary for an unwind and then `resume` to continue error
686     /// propagation:
687     ///
688     ///     landing_pad -> ... cleanups ... -> [resume]
689     ///
690     /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
691     /// in this function itself.)
692     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
693         let pad_bcx;
694
695         debug!("get_or_create_landing_pad");
696
697         // Check if a landing pad block exists; if not, create one.
698         {
699             let mut scopes = self.scopes.borrow_mut();
700             let last_scope = scopes.last_mut().unwrap();
701             match last_scope.cached_landing_pad {
702                 Some(llbb) => { return llbb; }
703                 None => {
704                     let name = last_scope.block_name("unwind");
705                     pad_bcx = self.new_block(true, name.index(&FullRange), None);
706                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
707                 }
708             }
709         }
710
711         // The landing pad return type (the type being propagated). Not sure what
712         // this represents but it's determined by the personality function and
713         // this is what the EH proposal example uses.
714         let llretty = Type::struct_(self.ccx,
715                                     &[Type::i8p(self.ccx), Type::i32(self.ccx)],
716                                     false);
717
718         // The exception handling personality function.
719         //
720         // If our compilation unit has the `eh_personality` lang item somewhere
721         // within it, then we just need to translate that. Otherwise, we're
722         // building an rlib which will depend on some upstream implementation of
723         // this function, so we just codegen a generic reference to it. We don't
724         // specify any of the types for the function, we just make it a symbol
725         // that LLVM can later use.
726         let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
727             Some(def_id) => {
728                 callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0),
729                                      pad_bcx.fcx.param_substs).val
730             }
731             None => {
732                 let mut personality = self.ccx.eh_personality().borrow_mut();
733                 match *personality {
734                     Some(llpersonality) => llpersonality,
735                     None => {
736                         let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
737                         let f = base::decl_cdecl_fn(self.ccx,
738                                                     "rust_eh_personality",
739                                                     fty,
740                                                     self.ccx.tcx().types.i32);
741                         *personality = Some(f);
742                         f
743                     }
744                 }
745             }
746         };
747
748         // The only landing pad clause will be 'cleanup'
749         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
750
751         // The landing pad block is a cleanup
752         build::SetCleanup(pad_bcx, llretval);
753
754         // We store the retval in a function-central alloca, so that calls to
755         // Resume can find it.
756         match self.personality.get() {
757             Some(addr) => {
758                 build::Store(pad_bcx, llretval, addr);
759             }
760             None => {
761                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
762                 self.personality.set(Some(addr));
763                 build::Store(pad_bcx, llretval, addr);
764             }
765         }
766
767         // Generate the cleanup block and branch to it.
768         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
769         build::Br(pad_bcx, cleanup_llbb);
770
771         return pad_bcx.llbb;
772     }
773 }
774
775 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
776     fn new(kind: CleanupScopeKind<'blk, 'tcx>,
777            debug_loc: Option<NodeInfo>)
778         -> CleanupScope<'blk, 'tcx> {
779         CleanupScope {
780             kind: kind,
781             debug_loc: debug_loc,
782             cleanups: vec!(),
783             cached_early_exits: vec!(),
784             cached_landing_pad: None,
785         }
786     }
787
788     fn clear_cached_exits(&mut self) {
789         self.cached_early_exits = vec!();
790         self.cached_landing_pad = None;
791     }
792
793     fn cached_early_exit(&self,
794                          label: EarlyExitLabel)
795                          -> Option<BasicBlockRef> {
796         self.cached_early_exits.iter().
797             find(|e| e.label == label).
798             map(|e| e.cleanup_block)
799     }
800
801     fn add_cached_early_exit(&mut self,
802                              label: EarlyExitLabel,
803                              blk: BasicBlockRef) {
804         self.cached_early_exits.push(
805             CachedEarlyExit { label: label,
806                               cleanup_block: blk });
807     }
808
809     /// True if this scope has cleanups that need unwinding
810     fn needs_invoke(&self) -> bool {
811
812         self.cached_landing_pad.is_some() ||
813             self.cleanups.iter().any(|c| c.must_unwind())
814     }
815
816     /// Returns a suitable name to use for the basic block that handles this cleanup scope
817     fn block_name(&self, prefix: &str) -> String {
818         match self.kind {
819             CustomScopeKind => format!("{}_custom_", prefix),
820             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
821             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
822         }
823     }
824
825     pub fn drop_non_lifetime_clean(&mut self) {
826         self.cleanups.retain(|c| c.is_lifetime_end());
827     }
828 }
829
830 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
831     fn is_temp(&self) -> bool {
832         match *self {
833             CustomScopeKind => true,
834             LoopScopeKind(..) | AstScopeKind(..) => false,
835         }
836     }
837
838     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
839         match *self {
840             CustomScopeKind | LoopScopeKind(..) => false,
841             AstScopeKind(i) => i == id
842         }
843     }
844
845     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
846         match *self {
847             CustomScopeKind | AstScopeKind(..) => false,
848             LoopScopeKind(i, _) => i == id
849         }
850     }
851
852     /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
853     fn early_exit_block(&self,
854                         id: ast::NodeId,
855                         exit: uint) -> Option<BasicBlockRef> {
856         match *self {
857             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
858             _ => None,
859         }
860     }
861 }
862
863 impl EarlyExitLabel {
864     fn is_unwind(&self) -> bool {
865         match *self {
866             UnwindExit => true,
867             _ => false
868         }
869     }
870 }
871
872 ///////////////////////////////////////////////////////////////////////////
873 // Cleanup types
874
875 #[derive(Copy)]
876 pub struct DropValue<'tcx> {
877     is_immediate: bool,
878     must_unwind: bool,
879     val: ValueRef,
880     ty: Ty<'tcx>,
881     zero: bool
882 }
883
884 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
885     fn must_unwind(&self) -> bool {
886         self.must_unwind
887     }
888
889     fn clean_on_unwind(&self) -> bool {
890         self.must_unwind
891     }
892
893     fn is_lifetime_end(&self) -> bool {
894         false
895     }
896
897     fn trans<'blk>(&self,
898                    bcx: Block<'blk, 'tcx>,
899                    debug_loc: Option<NodeInfo>)
900                    -> Block<'blk, 'tcx> {
901         let bcx = if self.is_immediate {
902             glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
903         } else {
904             glue::drop_ty(bcx, self.val, self.ty, debug_loc)
905         };
906         if self.zero {
907             base::zero_mem(bcx, self.val, self.ty);
908         }
909         bcx
910     }
911 }
912
913 #[derive(Copy, Show)]
914 pub enum Heap {
915     HeapExchange
916 }
917
918 #[derive(Copy)]
919 pub struct FreeValue<'tcx> {
920     ptr: ValueRef,
921     heap: Heap,
922     content_ty: Ty<'tcx>
923 }
924
925 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
926     fn must_unwind(&self) -> bool {
927         true
928     }
929
930     fn clean_on_unwind(&self) -> bool {
931         true
932     }
933
934     fn is_lifetime_end(&self) -> bool {
935         false
936     }
937
938     fn trans<'blk>(&self,
939                    bcx: Block<'blk, 'tcx>,
940                    debug_loc: Option<NodeInfo>)
941                    -> Block<'blk, 'tcx> {
942         apply_debug_loc(bcx.fcx, debug_loc);
943
944         match self.heap {
945             HeapExchange => {
946                 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
947             }
948         }
949     }
950 }
951
952 #[derive(Copy)]
953 pub struct FreeSlice {
954     ptr: ValueRef,
955     size: ValueRef,
956     align: ValueRef,
957     heap: Heap,
958 }
959
960 impl<'tcx> Cleanup<'tcx> for FreeSlice {
961     fn must_unwind(&self) -> bool {
962         true
963     }
964
965     fn clean_on_unwind(&self) -> bool {
966         true
967     }
968
969     fn is_lifetime_end(&self) -> bool {
970         false
971     }
972
973     fn trans<'blk>(&self,
974                    bcx: Block<'blk, 'tcx>,
975                    debug_loc: Option<NodeInfo>)
976                    -> Block<'blk, 'tcx> {
977         apply_debug_loc(bcx.fcx, debug_loc);
978
979         match self.heap {
980             HeapExchange => {
981                 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
982             }
983         }
984     }
985 }
986
987 #[derive(Copy)]
988 pub struct LifetimeEnd {
989     ptr: ValueRef,
990 }
991
992 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
993     fn must_unwind(&self) -> bool {
994         false
995     }
996
997     fn clean_on_unwind(&self) -> bool {
998         true
999     }
1000
1001     fn is_lifetime_end(&self) -> bool {
1002         true
1003     }
1004
1005     fn trans<'blk>(&self,
1006                    bcx: Block<'blk, 'tcx>,
1007                    debug_loc: Option<NodeInfo>)
1008                    -> Block<'blk, 'tcx> {
1009         apply_debug_loc(bcx.fcx, debug_loc);
1010         base::call_lifetime_end(bcx, self.ptr);
1011         bcx
1012     }
1013 }
1014
1015 pub fn temporary_scope(tcx: &ty::ctxt,
1016                        id: ast::NodeId)
1017                        -> ScopeId {
1018     match tcx.region_maps.temporary_scope(id) {
1019         Some(scope) => {
1020             let r = AstScope(scope.node_id());
1021             debug!("temporary_scope({}) = {:?}", id, r);
1022             r
1023         }
1024         None => {
1025             tcx.sess.bug(format!("no temporary scope available for expr {}",
1026                                  id).index(&FullRange))
1027         }
1028     }
1029 }
1030
1031 pub fn var_scope(tcx: &ty::ctxt,
1032                  id: ast::NodeId)
1033                  -> ScopeId {
1034     let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1035     debug!("var_scope({}) = {:?}", id, r);
1036     r
1037 }
1038
1039 fn cleanup_is_suitable_for(c: &Cleanup,
1040                            label: EarlyExitLabel) -> bool {
1041     !label.is_unwind() || c.clean_on_unwind()
1042 }
1043
1044 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1045     match debug_loc {
1046         Some(ref src_loc) => {
1047             debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1048         }
1049         None => {
1050             debuginfo::clear_source_location(fcx);
1051         }
1052     }
1053 }
1054
1055 ///////////////////////////////////////////////////////////////////////////
1056 // These traits just exist to put the methods into this file.
1057
1058 pub trait CleanupMethods<'blk, 'tcx> {
1059     fn push_ast_cleanup_scope(&self, id: NodeInfo);
1060     fn push_loop_cleanup_scope(&self,
1061                                id: ast::NodeId,
1062                                exits: [Block<'blk, 'tcx>; EXIT_MAX]);
1063     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1064     fn push_custom_cleanup_scope_with_debug_loc(&self,
1065                                                 debug_loc: NodeInfo)
1066                                                 -> CustomScopeIndex;
1067     fn pop_and_trans_ast_cleanup_scope(&self,
1068                                               bcx: Block<'blk, 'tcx>,
1069                                               cleanup_scope: ast::NodeId)
1070                                               -> Block<'blk, 'tcx>;
1071     fn pop_loop_cleanup_scope(&self,
1072                               cleanup_scope: ast::NodeId);
1073     fn pop_custom_cleanup_scope(&self,
1074                                 custom_scope: CustomScopeIndex);
1075     fn pop_and_trans_custom_cleanup_scope(&self,
1076                                           bcx: Block<'blk, 'tcx>,
1077                                           custom_scope: CustomScopeIndex)
1078                                           -> Block<'blk, 'tcx>;
1079     fn top_loop_scope(&self) -> ast::NodeId;
1080     fn normal_exit_block(&'blk self,
1081                          cleanup_scope: ast::NodeId,
1082                          exit: uint) -> BasicBlockRef;
1083     fn return_exit_block(&'blk self) -> BasicBlockRef;
1084     fn schedule_lifetime_end(&self,
1085                          cleanup_scope: ScopeId,
1086                          val: ValueRef);
1087     fn schedule_drop_mem(&self,
1088                          cleanup_scope: ScopeId,
1089                          val: ValueRef,
1090                          ty: Ty<'tcx>);
1091     fn schedule_drop_and_zero_mem(&self,
1092                                   cleanup_scope: ScopeId,
1093                                   val: ValueRef,
1094                                   ty: Ty<'tcx>);
1095     fn schedule_drop_immediate(&self,
1096                                cleanup_scope: ScopeId,
1097                                val: ValueRef,
1098                                ty: Ty<'tcx>);
1099     fn schedule_free_value(&self,
1100                            cleanup_scope: ScopeId,
1101                            val: ValueRef,
1102                            heap: Heap,
1103                            content_ty: Ty<'tcx>);
1104     fn schedule_free_slice(&self,
1105                            cleanup_scope: ScopeId,
1106                            val: ValueRef,
1107                            size: ValueRef,
1108                            align: ValueRef,
1109                            heap: Heap);
1110     fn schedule_clean(&self,
1111                       cleanup_scope: ScopeId,
1112                       cleanup: CleanupObj<'tcx>);
1113     fn schedule_clean_in_ast_scope(&self,
1114                                    cleanup_scope: ast::NodeId,
1115                                    cleanup: CleanupObj<'tcx>);
1116     fn schedule_clean_in_custom_scope(&self,
1117                                     custom_scope: CustomScopeIndex,
1118                                     cleanup: CleanupObj<'tcx>);
1119     fn needs_invoke(&self) -> bool;
1120     fn get_landing_pad(&'blk self) -> BasicBlockRef;
1121 }
1122
1123 trait CleanupHelperMethods<'blk, 'tcx> {
1124     fn top_ast_scope(&self) -> Option<ast::NodeId>;
1125     fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1126     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1127     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1128     fn trans_scope_cleanups(&self,
1129                             bcx: Block<'blk, 'tcx>,
1130                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1131     fn trans_cleanups_to_exit_scope(&'blk self,
1132                                     label: EarlyExitLabel)
1133                                     -> BasicBlockRef;
1134     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1135     fn scopes_len(&self) -> uint;
1136     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1137     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1138     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
1139 }