]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/cleanup.rs
doc: remove incomplete sentence
[rust.git] / src / librustc_trans / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Code pertaining to cleanup of temporaries as well as execution of
12 //! drop glue. See discussion in `doc.rs` for a high-level summary.
13
14 pub use self::ScopeId::*;
15 pub use self::CleanupScopeKind::*;
16 pub use self::EarlyExitLabel::*;
17 pub use self::Heap::*;
18
19 use llvm::{BasicBlockRef, ValueRef};
20 use trans::base;
21 use trans::build;
22 use trans::callee;
23 use trans::common;
24 use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
25 use trans::debuginfo;
26 use trans::glue;
27 // Temporary due to slicing syntax hacks (KILLME)
28 //use middle::region;
29 use trans::type_::Type;
30 use middle::ty::{mod, Ty};
31 use std::fmt;
32 use syntax::ast;
33 use util::ppaux::Repr;
34
35 pub struct CleanupScope<'blk, 'tcx: 'blk> {
36     // The id of this cleanup scope. If the id is None,
37     // this is a *temporary scope* that is pushed during trans to
38     // cleanup miscellaneous garbage that trans may generate whose
39     // lifetime is a subset of some expression.  See module doc for
40     // more details.
41     kind: CleanupScopeKind<'blk, 'tcx>,
42
43     // Cleanups to run upon scope exit.
44     cleanups: Vec<CleanupObj<'tcx>>,
45
46     // The debug location any drop calls generated for this scope will be
47     // associated with.
48     debug_loc: Option<NodeInfo>,
49
50     cached_early_exits: Vec<CachedEarlyExit>,
51     cached_landing_pad: Option<BasicBlockRef>,
52 }
53
54 #[deriving(Copy, Show)]
55 pub struct CustomScopeIndex {
56     index: uint
57 }
58
59 pub const EXIT_BREAK: uint = 0;
60 pub const EXIT_LOOP: uint = 1;
61 pub const EXIT_MAX: uint = 2;
62
63 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
64     CustomScopeKind,
65     AstScopeKind(ast::NodeId),
66     LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
67 }
68
69 impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
70     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
71         match *self {
72             CustomScopeKind => write!(f, "CustomScopeKind"),
73             AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
74             LoopScopeKind(nid, ref blks) => {
75                 try!(write!(f, "LoopScopeKind({}, [", nid));
76                 for blk in blks.iter() {
77                     try!(write!(f, "{:p}, ", blk));
78                 }
79                 write!(f, "])")
80             }
81         }
82     }
83 }
84
85 #[deriving(Copy, PartialEq, Show)]
86 pub enum EarlyExitLabel {
87     UnwindExit,
88     ReturnExit,
89     LoopExit(ast::NodeId, uint)
90 }
91
92 #[deriving(Copy)]
93 pub struct CachedEarlyExit {
94     label: EarlyExitLabel,
95     cleanup_block: BasicBlockRef,
96 }
97
98 pub trait Cleanup<'tcx> {
99     fn must_unwind(&self) -> bool;
100     fn clean_on_unwind(&self) -> bool;
101     fn is_lifetime_end(&self) -> bool;
102     fn trans<'blk>(&self,
103                    bcx: Block<'blk, 'tcx>,
104                    debug_loc: Option<NodeInfo>)
105                    -> Block<'blk, 'tcx>;
106 }
107
108 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
109
110 #[deriving(Copy, Show)]
111 pub enum ScopeId {
112     AstScope(ast::NodeId),
113     CustomScope(CustomScopeIndex)
114 }
115
116 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
117     /// Invoked when we start to trans the code contained within a new cleanup scope.
118     fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
119         debug!("push_ast_cleanup_scope({})",
120                self.ccx.tcx().map.node_to_string(debug_loc.id));
121
122         // FIXME(#2202) -- currently closure bodies have a parent
123         // region, which messes up the assertion below, since there
124         // are no cleanup scopes on the stack at the start of
125         // trans'ing a closure body.  I think though that this should
126         // eventually be fixed by closure bodies not having a parent
127         // region, though that's a touch unclear, and it might also be
128         // better just to narrow this assertion more (i.e., by
129         // excluding id's that correspond to closure bodies only). For
130         // now we just say that if there is already an AST scope on the stack,
131         // this new AST scope had better be its immediate child.
132         // Temporarily removed due to slicing syntax hacks (KILLME).
133         /*let top_scope = self.top_ast_scope();
134         if top_scope.is_some() {
135             assert_eq!(self.ccx
136                            .tcx()
137                            .region_maps
138                            .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
139                            .map(|s|s.node_id()),
140                        top_scope);
141         }*/
142
143         self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
144                                           Some(debug_loc)));
145     }
146
147     fn push_loop_cleanup_scope(&self,
148                                id: ast::NodeId,
149                                exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
150         debug!("push_loop_cleanup_scope({})",
151                self.ccx.tcx().map.node_to_string(id));
152         assert_eq!(Some(id), self.top_ast_scope());
153
154         // Just copy the debuginfo source location from the enclosing scope
155         let debug_loc = self.scopes
156                             .borrow()
157                             .last()
158                             .unwrap()
159                             .debug_loc;
160
161         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
162     }
163
164     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
165         let index = self.scopes_len();
166         debug!("push_custom_cleanup_scope(): {}", index);
167
168         // Just copy the debuginfo source location from the enclosing scope
169         let debug_loc = self.scopes
170                             .borrow()
171                             .last()
172                             .map(|opt_scope| opt_scope.debug_loc)
173                             .unwrap_or(None);
174
175         self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
176         CustomScopeIndex { index: index }
177     }
178
179     fn push_custom_cleanup_scope_with_debug_loc(&self,
180                                                 debug_loc: NodeInfo)
181                                                 -> CustomScopeIndex {
182         let index = self.scopes_len();
183         debug!("push_custom_cleanup_scope(): {}", index);
184
185         self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
186         CustomScopeIndex { index: index }
187     }
188
189     /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
190     /// stack, and generates the code to do its cleanups for normal exit.
191     fn pop_and_trans_ast_cleanup_scope(&self,
192                                        bcx: Block<'blk, 'tcx>,
193                                        cleanup_scope: ast::NodeId)
194                                        -> Block<'blk, 'tcx> {
195         debug!("pop_and_trans_ast_cleanup_scope({})",
196                self.ccx.tcx().map.node_to_string(cleanup_scope));
197
198         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
199
200         let scope = self.pop_scope();
201         self.trans_scope_cleanups(bcx, &scope)
202     }
203
204     /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
205     /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
206     /// branching to a block generated by `normal_exit_block`.
207     fn pop_loop_cleanup_scope(&self,
208                               cleanup_scope: ast::NodeId) {
209         debug!("pop_loop_cleanup_scope({})",
210                self.ccx.tcx().map.node_to_string(cleanup_scope));
211
212         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
213
214         let _ = self.pop_scope();
215     }
216
217     /// Removes the top cleanup scope from the stack without executing its cleanups. The top
218     /// cleanup scope must be the temporary scope `custom_scope`.
219     fn pop_custom_cleanup_scope(&self,
220                                 custom_scope: CustomScopeIndex) {
221         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
222         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
223         let _ = self.pop_scope();
224     }
225
226     /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
227     /// generates the code to do its cleanups for normal exit.
228     fn pop_and_trans_custom_cleanup_scope(&self,
229                                           bcx: Block<'blk, 'tcx>,
230                                           custom_scope: CustomScopeIndex)
231                                           -> Block<'blk, 'tcx> {
232         debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
233         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
234
235         let scope = self.pop_scope();
236         self.trans_scope_cleanups(bcx, &scope)
237     }
238
239     /// Returns the id of the top-most loop scope
240     fn top_loop_scope(&self) -> ast::NodeId {
241         for scope in self.scopes.borrow().iter().rev() {
242             if let LoopScopeKind(id, _) = scope.kind {
243                 return id;
244             }
245         }
246         self.ccx.sess().bug("no loop scope found");
247     }
248
249     /// Returns a block to branch to which will perform all pending cleanups and then
250     /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
251     fn normal_exit_block(&'blk self,
252                          cleanup_scope: ast::NodeId,
253                          exit: uint) -> BasicBlockRef {
254         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
255     }
256
257     /// Returns a block to branch to which will perform all pending cleanups and then return from
258     /// this function
259     fn return_exit_block(&'blk self) -> BasicBlockRef {
260         self.trans_cleanups_to_exit_scope(ReturnExit)
261     }
262
263     fn schedule_lifetime_end(&self,
264                              cleanup_scope: ScopeId,
265                              val: ValueRef) {
266         let drop = box LifetimeEnd {
267             ptr: val,
268         };
269
270         debug!("schedule_lifetime_end({}, val={})",
271                cleanup_scope,
272                self.ccx.tn().val_to_string(val));
273
274         self.schedule_clean(cleanup_scope, drop as CleanupObj);
275     }
276
277     /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
278     fn schedule_drop_mem(&self,
279                          cleanup_scope: ScopeId,
280                          val: ValueRef,
281                          ty: Ty<'tcx>) {
282         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
283         let drop = box DropValue {
284             is_immediate: false,
285             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
286             val: val,
287             ty: ty,
288             zero: false
289         };
290
291         debug!("schedule_drop_mem({}, val={}, ty={})",
292                cleanup_scope,
293                self.ccx.tn().val_to_string(val),
294                ty.repr(self.ccx.tcx()));
295
296         self.schedule_clean(cleanup_scope, drop as CleanupObj);
297     }
298
299     /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
300     fn schedule_drop_and_zero_mem(&self,
301                                   cleanup_scope: ScopeId,
302                                   val: ValueRef,
303                                   ty: Ty<'tcx>) {
304         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
305         let drop = box DropValue {
306             is_immediate: false,
307             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
308             val: val,
309             ty: ty,
310             zero: true
311         };
312
313         debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
314                cleanup_scope,
315                self.ccx.tn().val_to_string(val),
316                ty.repr(self.ccx.tcx()),
317                true);
318
319         self.schedule_clean(cleanup_scope, drop as CleanupObj);
320     }
321
322     /// Schedules a (deep) drop of `val`, which is an instance of `ty`
323     fn schedule_drop_immediate(&self,
324                                cleanup_scope: ScopeId,
325                                val: ValueRef,
326                                ty: Ty<'tcx>) {
327
328         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
329         let drop = box DropValue {
330             is_immediate: true,
331             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
332             val: val,
333             ty: ty,
334             zero: false
335         };
336
337         debug!("schedule_drop_immediate({}, val={}, ty={})",
338                cleanup_scope,
339                self.ccx.tn().val_to_string(val),
340                ty.repr(self.ccx.tcx()));
341
342         self.schedule_clean(cleanup_scope, drop as CleanupObj);
343     }
344
345     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
346     fn schedule_free_value(&self,
347                            cleanup_scope: ScopeId,
348                            val: ValueRef,
349                            heap: Heap,
350                            content_ty: Ty<'tcx>) {
351         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
352
353         debug!("schedule_free_value({}, val={}, heap={})",
354                cleanup_scope,
355                self.ccx.tn().val_to_string(val),
356                heap);
357
358         self.schedule_clean(cleanup_scope, drop as CleanupObj);
359     }
360
361     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
362     fn schedule_free_slice(&self,
363                            cleanup_scope: ScopeId,
364                            val: ValueRef,
365                            size: ValueRef,
366                            align: ValueRef,
367                            heap: Heap) {
368         let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
369
370         debug!("schedule_free_slice({}, val={}, heap={})",
371                cleanup_scope,
372                self.ccx.tn().val_to_string(val),
373                heap);
374
375         self.schedule_clean(cleanup_scope, drop as CleanupObj);
376     }
377
378     fn schedule_clean(&self,
379                       cleanup_scope: ScopeId,
380                       cleanup: CleanupObj<'tcx>) {
381         match cleanup_scope {
382             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
383             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
384         }
385     }
386
387     /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
388     /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
389     /// scope.
390     fn schedule_clean_in_ast_scope(&self,
391                                    cleanup_scope: ast::NodeId,
392                                    cleanup: CleanupObj<'tcx>) {
393         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
394                cleanup_scope);
395
396         for scope in self.scopes.borrow_mut().iter_mut().rev() {
397             if scope.kind.is_ast_with_id(cleanup_scope) {
398                 scope.cleanups.push(cleanup);
399                 scope.clear_cached_exits();
400                 return;
401             } else {
402                 // will be adding a cleanup to some enclosing scope
403                 scope.clear_cached_exits();
404             }
405         }
406
407         self.ccx.sess().bug(
408             format!("no cleanup scope {} found",
409                     self.ccx.tcx().map.node_to_string(cleanup_scope))[]);
410     }
411
412     /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
413     fn schedule_clean_in_custom_scope(&self,
414                                       custom_scope: CustomScopeIndex,
415                                       cleanup: CleanupObj<'tcx>) {
416         debug!("schedule_clean_in_custom_scope(custom_scope={})",
417                custom_scope.index);
418
419         assert!(self.is_valid_custom_scope(custom_scope));
420
421         let mut scopes = self.scopes.borrow_mut();
422         let scope = &mut (*scopes)[custom_scope.index];
423         scope.cleanups.push(cleanup);
424         scope.clear_cached_exits();
425     }
426
427     /// Returns true if there are pending cleanups that should execute on panic.
428     fn needs_invoke(&self) -> bool {
429         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
430     }
431
432     /// Returns a basic block to branch to in the event of a panic. This block will run the panic
433     /// cleanups and eventually invoke the LLVM `Resume` instruction.
434     fn get_landing_pad(&'blk self) -> BasicBlockRef {
435         let _icx = base::push_ctxt("get_landing_pad");
436
437         debug!("get_landing_pad");
438
439         let orig_scopes_len = self.scopes_len();
440         assert!(orig_scopes_len > 0);
441
442         // Remove any scopes that do not have cleanups on panic:
443         let mut popped_scopes = vec!();
444         while !self.top_scope(|s| s.needs_invoke()) {
445             debug!("top scope does not need invoke");
446             popped_scopes.push(self.pop_scope());
447         }
448
449         // Check for an existing landing pad in the new topmost scope:
450         let llbb = self.get_or_create_landing_pad();
451
452         // Push the scopes we removed back on:
453         loop {
454             match popped_scopes.pop() {
455                 Some(scope) => self.push_scope(scope),
456                 None => break
457             }
458         }
459
460         assert_eq!(self.scopes_len(), orig_scopes_len);
461
462         return llbb;
463     }
464 }
465
466 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
467     /// Returns the id of the current top-most AST scope, if any.
468     fn top_ast_scope(&self) -> Option<ast::NodeId> {
469         for scope in self.scopes.borrow().iter().rev() {
470             match scope.kind {
471                 CustomScopeKind | LoopScopeKind(..) => {}
472                 AstScopeKind(i) => {
473                     return Some(i);
474                 }
475             }
476         }
477         None
478     }
479
480     fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
481         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
482     }
483
484     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
485         self.is_valid_custom_scope(custom_scope) &&
486             custom_scope.index == self.scopes.borrow().len() - 1
487     }
488
489     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
490         let scopes = self.scopes.borrow();
491         custom_scope.index < scopes.len() &&
492             (*scopes)[custom_scope.index].kind.is_temp()
493     }
494
495     /// Generates the cleanups for `scope` into `bcx`
496     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
497                             bcx: Block<'blk, 'tcx>,
498                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
499
500         let mut bcx = bcx;
501         if !bcx.unreachable.get() {
502             for cleanup in scope.cleanups.iter().rev() {
503                 bcx = cleanup.trans(bcx, scope.debug_loc);
504             }
505         }
506         bcx
507     }
508
509     fn scopes_len(&self) -> uint {
510         self.scopes.borrow().len()
511     }
512
513     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
514         self.scopes.borrow_mut().push(scope)
515     }
516
517     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
518         debug!("popping cleanup scope {}, {} scopes remaining",
519                self.top_scope(|s| s.block_name("")),
520                self.scopes_len() - 1);
521
522         self.scopes.borrow_mut().pop().unwrap()
523     }
524
525     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
526         f(self.scopes.borrow().last().unwrap())
527     }
528
529     /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
530     /// unwind. This function will generate all cleanups between the top of the stack and the exit
531     /// `label` and return a basic block that the caller can branch to.
532     ///
533     /// For example, if the current stack of cleanups were as follows:
534     ///
535     ///      AST 22
536     ///      Custom 1
537     ///      AST 23
538     ///      Loop 23
539     ///      Custom 2
540     ///      AST 24
541     ///
542     /// and the `label` specifies a break from `Loop 23`, then this function would generate a
543     /// series of basic blocks as follows:
544     ///
545     ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
546     ///
547     /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
548     /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
549     /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
550     /// `break_blk`.
551     fn trans_cleanups_to_exit_scope(&'blk self,
552                                     label: EarlyExitLabel)
553                                     -> BasicBlockRef {
554         debug!("trans_cleanups_to_exit_scope label={} scopes={}",
555                label, self.scopes_len());
556
557         let orig_scopes_len = self.scopes_len();
558         let mut prev_llbb;
559         let mut popped_scopes = vec!();
560
561         // First we pop off all the cleanup stacks that are
562         // traversed until the exit is reached, pushing them
563         // onto the side vector `popped_scopes`. No code is
564         // generated at this time.
565         //
566         // So, continuing the example from above, we would wind up
567         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
568         // (Presuming that there are no cached exits)
569         loop {
570             if self.scopes_len() == 0 {
571                 match label {
572                     UnwindExit => {
573                         // Generate a block that will `Resume`.
574                         let prev_bcx = self.new_block(true, "resume", None);
575                         let personality = self.personality.get().expect(
576                             "create_landing_pad() should have set this");
577                         build::Resume(prev_bcx,
578                                       build::Load(prev_bcx, personality));
579                         prev_llbb = prev_bcx.llbb;
580                         break;
581                     }
582
583                     ReturnExit => {
584                         prev_llbb = self.get_llreturn();
585                         break;
586                     }
587
588                     LoopExit(id, _) => {
589                         self.ccx.sess().bug(format!(
590                                 "cannot exit from scope {}, \
591                                 not in scope", id)[]);
592                     }
593                 }
594             }
595
596             // Check if we have already cached the unwinding of this
597             // scope for this label. If so, we can stop popping scopes
598             // and branch to the cached label, since it contains the
599             // cleanups for any subsequent scopes.
600             match self.top_scope(|s| s.cached_early_exit(label)) {
601                 Some(cleanup_block) => {
602                     prev_llbb = cleanup_block;
603                     break;
604                 }
605                 None => { }
606             }
607
608             // Pop off the scope, since we will be generating
609             // unwinding code for it. If we are searching for a loop exit,
610             // and this scope is that loop, then stop popping and set
611             // `prev_llbb` to the appropriate exit block from the loop.
612             popped_scopes.push(self.pop_scope());
613             let scope = popped_scopes.last().unwrap();
614             match label {
615                 UnwindExit | ReturnExit => { }
616                 LoopExit(id, exit) => {
617                     match scope.kind.early_exit_block(id, exit) {
618                         Some(exitllbb) => {
619                             prev_llbb = exitllbb;
620                             break;
621                         }
622
623                         None => { }
624                     }
625                 }
626             }
627         }
628
629         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
630                popped_scopes.len());
631
632         // Now push the popped scopes back on. As we go,
633         // we track in `prev_llbb` the exit to which this scope
634         // should branch when it's done.
635         //
636         // So, continuing with our example, we will start out with
637         // `prev_llbb` being set to `break_blk` (or possibly a cached
638         // early exit). We will then pop the scopes from `popped_scopes`
639         // and generate a basic block for each one, prepending it in the
640         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
641         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
642         // branch to `prev_llbb == break_blk`, giving us a sequence like:
643         //
644         //     Cleanup(Custom 2) -> prev_llbb
645         //
646         // We then pop `AST 24` and repeat the process, giving us the sequence:
647         //
648         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
649         //
650         // At this point, `popped_scopes` is empty, and so the final block
651         // that we return to the user is `Cleanup(AST 24)`.
652         while !popped_scopes.is_empty() {
653             let mut scope = popped_scopes.pop().unwrap();
654
655             if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
656             {
657                 let name = scope.block_name("clean");
658                 debug!("generating cleanups for {}", name);
659                 let bcx_in = self.new_block(label.is_unwind(),
660                                             name[],
661                                             None);
662                 let mut bcx_out = bcx_in;
663                 for cleanup in scope.cleanups.iter().rev() {
664                     if cleanup_is_suitable_for(&**cleanup, label) {
665                         bcx_out = cleanup.trans(bcx_out,
666                                                 scope.debug_loc);
667                     }
668                 }
669                 build::Br(bcx_out, prev_llbb);
670                 prev_llbb = bcx_in.llbb;
671             } else {
672                 debug!("no suitable cleanups in {}",
673                        scope.block_name("clean"));
674             }
675
676             scope.add_cached_early_exit(label, prev_llbb);
677             self.push_scope(scope);
678         }
679
680         debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
681
682         assert_eq!(self.scopes_len(), orig_scopes_len);
683         prev_llbb
684     }
685
686     /// Creates a landing pad for the top scope, if one does not exist.  The landing pad will
687     /// perform all cleanups necessary for an unwind and then `resume` to continue error
688     /// propagation:
689     ///
690     ///     landing_pad -> ... cleanups ... -> [resume]
691     ///
692     /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
693     /// in this function itself.)
694     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
695         let pad_bcx;
696
697         debug!("get_or_create_landing_pad");
698
699         // Check if a landing pad block exists; if not, create one.
700         {
701             let mut scopes = self.scopes.borrow_mut();
702             let last_scope = scopes.last_mut().unwrap();
703             match last_scope.cached_landing_pad {
704                 Some(llbb) => { return llbb; }
705                 None => {
706                     let name = last_scope.block_name("unwind");
707                     pad_bcx = self.new_block(true, name[], None);
708                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
709                 }
710             }
711         }
712
713         // The landing pad return type (the type being propagated). Not sure what
714         // this represents but it's determined by the personality function and
715         // this is what the EH proposal example uses.
716         let llretty = Type::struct_(self.ccx,
717                                     &[Type::i8p(self.ccx), Type::i32(self.ccx)],
718                                     false);
719
720         // The exception handling personality function.
721         //
722         // If our compilation unit has the `eh_personality` lang item somewhere
723         // within it, then we just need to translate that. Otherwise, we're
724         // building an rlib which will depend on some upstream implementation of
725         // this function, so we just codegen a generic reference to it. We don't
726         // specify any of the types for the function, we just make it a symbol
727         // that LLVM can later use.
728         let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
729             Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
730             None => {
731                 let mut personality = self.ccx.eh_personality().borrow_mut();
732                 match *personality {
733                     Some(llpersonality) => llpersonality,
734                     None => {
735                         let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
736                         let f = base::decl_cdecl_fn(self.ccx,
737                                                     "rust_eh_personality",
738                                                     fty,
739                                                     self.ccx.tcx().types.i32);
740                         *personality = Some(f);
741                         f
742                     }
743                 }
744             }
745         };
746
747         // The only landing pad clause will be 'cleanup'
748         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
749
750         // The landing pad block is a cleanup
751         build::SetCleanup(pad_bcx, llretval);
752
753         // We store the retval in a function-central alloca, so that calls to
754         // Resume can find it.
755         match self.personality.get() {
756             Some(addr) => {
757                 build::Store(pad_bcx, llretval, addr);
758             }
759             None => {
760                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
761                 self.personality.set(Some(addr));
762                 build::Store(pad_bcx, llretval, addr);
763             }
764         }
765
766         // Generate the cleanup block and branch to it.
767         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
768         build::Br(pad_bcx, cleanup_llbb);
769
770         return pad_bcx.llbb;
771     }
772 }
773
774 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
775     fn new(kind: CleanupScopeKind<'blk, 'tcx>,
776            debug_loc: Option<NodeInfo>)
777         -> CleanupScope<'blk, 'tcx> {
778         CleanupScope {
779             kind: kind,
780             debug_loc: debug_loc,
781             cleanups: vec!(),
782             cached_early_exits: vec!(),
783             cached_landing_pad: None,
784         }
785     }
786
787     fn clear_cached_exits(&mut self) {
788         self.cached_early_exits = vec!();
789         self.cached_landing_pad = None;
790     }
791
792     fn cached_early_exit(&self,
793                          label: EarlyExitLabel)
794                          -> Option<BasicBlockRef> {
795         self.cached_early_exits.iter().
796             find(|e| e.label == label).
797             map(|e| e.cleanup_block)
798     }
799
800     fn add_cached_early_exit(&mut self,
801                              label: EarlyExitLabel,
802                              blk: BasicBlockRef) {
803         self.cached_early_exits.push(
804             CachedEarlyExit { label: label,
805                               cleanup_block: blk });
806     }
807
808     /// True if this scope has cleanups that need unwinding
809     fn needs_invoke(&self) -> bool {
810
811         self.cached_landing_pad.is_some() ||
812             self.cleanups.iter().any(|c| c.must_unwind())
813     }
814
815     /// Returns a suitable name to use for the basic block that handles this cleanup scope
816     fn block_name(&self, prefix: &str) -> String {
817         match self.kind {
818             CustomScopeKind => format!("{}_custom_", prefix),
819             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
820             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
821         }
822     }
823
824     pub fn drop_non_lifetime_clean(&mut self) {
825         self.cleanups.retain(|c| c.is_lifetime_end());
826     }
827 }
828
829 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
830     fn is_temp(&self) -> bool {
831         match *self {
832             CustomScopeKind => true,
833             LoopScopeKind(..) | AstScopeKind(..) => false,
834         }
835     }
836
837     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
838         match *self {
839             CustomScopeKind | LoopScopeKind(..) => false,
840             AstScopeKind(i) => i == id
841         }
842     }
843
844     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
845         match *self {
846             CustomScopeKind | AstScopeKind(..) => false,
847             LoopScopeKind(i, _) => i == id
848         }
849     }
850
851     /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
852     fn early_exit_block(&self,
853                         id: ast::NodeId,
854                         exit: uint) -> Option<BasicBlockRef> {
855         match *self {
856             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
857             _ => None,
858         }
859     }
860 }
861
862 impl EarlyExitLabel {
863     fn is_unwind(&self) -> bool {
864         match *self {
865             UnwindExit => true,
866             _ => false
867         }
868     }
869 }
870
871 ///////////////////////////////////////////////////////////////////////////
872 // Cleanup types
873
874 #[deriving(Copy)]
875 pub struct DropValue<'tcx> {
876     is_immediate: bool,
877     must_unwind: bool,
878     val: ValueRef,
879     ty: Ty<'tcx>,
880     zero: bool
881 }
882
883 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
884     fn must_unwind(&self) -> bool {
885         self.must_unwind
886     }
887
888     fn clean_on_unwind(&self) -> bool {
889         self.must_unwind
890     }
891
892     fn is_lifetime_end(&self) -> bool {
893         false
894     }
895
896     fn trans<'blk>(&self,
897                    bcx: Block<'blk, 'tcx>,
898                    debug_loc: Option<NodeInfo>)
899                    -> Block<'blk, 'tcx> {
900         let bcx = if self.is_immediate {
901             glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
902         } else {
903             glue::drop_ty(bcx, self.val, self.ty, debug_loc)
904         };
905         if self.zero {
906             base::zero_mem(bcx, self.val, self.ty);
907         }
908         bcx
909     }
910 }
911
912 #[deriving(Copy, Show)]
913 pub enum Heap {
914     HeapExchange
915 }
916
917 #[deriving(Copy)]
918 pub struct FreeValue<'tcx> {
919     ptr: ValueRef,
920     heap: Heap,
921     content_ty: Ty<'tcx>
922 }
923
924 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
925     fn must_unwind(&self) -> bool {
926         true
927     }
928
929     fn clean_on_unwind(&self) -> bool {
930         true
931     }
932
933     fn is_lifetime_end(&self) -> bool {
934         false
935     }
936
937     fn trans<'blk>(&self,
938                    bcx: Block<'blk, 'tcx>,
939                    debug_loc: Option<NodeInfo>)
940                    -> Block<'blk, 'tcx> {
941         apply_debug_loc(bcx.fcx, debug_loc);
942
943         match self.heap {
944             HeapExchange => {
945                 glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
946             }
947         }
948     }
949 }
950
951 #[deriving(Copy)]
952 pub struct FreeSlice {
953     ptr: ValueRef,
954     size: ValueRef,
955     align: ValueRef,
956     heap: Heap,
957 }
958
959 impl<'tcx> Cleanup<'tcx> for FreeSlice {
960     fn must_unwind(&self) -> bool {
961         true
962     }
963
964     fn clean_on_unwind(&self) -> bool {
965         true
966     }
967
968     fn is_lifetime_end(&self) -> bool {
969         false
970     }
971
972     fn trans<'blk>(&self,
973                    bcx: Block<'blk, 'tcx>,
974                    debug_loc: Option<NodeInfo>)
975                    -> Block<'blk, 'tcx> {
976         apply_debug_loc(bcx.fcx, debug_loc);
977
978         match self.heap {
979             HeapExchange => {
980                 glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
981             }
982         }
983     }
984 }
985
986 #[deriving(Copy)]
987 pub struct LifetimeEnd {
988     ptr: ValueRef,
989 }
990
991 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
992     fn must_unwind(&self) -> bool {
993         false
994     }
995
996     fn clean_on_unwind(&self) -> bool {
997         true
998     }
999
1000     fn is_lifetime_end(&self) -> bool {
1001         true
1002     }
1003
1004     fn trans<'blk>(&self,
1005                    bcx: Block<'blk, 'tcx>,
1006                    debug_loc: Option<NodeInfo>)
1007                    -> Block<'blk, 'tcx> {
1008         apply_debug_loc(bcx.fcx, debug_loc);
1009         base::call_lifetime_end(bcx, self.ptr);
1010         bcx
1011     }
1012 }
1013
1014 pub fn temporary_scope(tcx: &ty::ctxt,
1015                        id: ast::NodeId)
1016                        -> ScopeId {
1017     match tcx.region_maps.temporary_scope(id) {
1018         Some(scope) => {
1019             let r = AstScope(scope.node_id());
1020             debug!("temporary_scope({}) = {}", id, r);
1021             r
1022         }
1023         None => {
1024             tcx.sess.bug(format!("no temporary scope available for expr {}",
1025                                  id)[])
1026         }
1027     }
1028 }
1029
1030 pub fn var_scope(tcx: &ty::ctxt,
1031                  id: ast::NodeId)
1032                  -> ScopeId {
1033     let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1034     debug!("var_scope({}) = {}", id, r);
1035     r
1036 }
1037
1038 fn cleanup_is_suitable_for(c: &Cleanup,
1039                            label: EarlyExitLabel) -> bool {
1040     !label.is_unwind() || c.clean_on_unwind()
1041 }
1042
1043 fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
1044     match debug_loc {
1045         Some(ref src_loc) => {
1046             debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
1047         }
1048         None => {
1049             debuginfo::clear_source_location(fcx);
1050         }
1051     }
1052 }
1053
1054 ///////////////////////////////////////////////////////////////////////////
1055 // These traits just exist to put the methods into this file.
1056
1057 pub trait CleanupMethods<'blk, 'tcx> {
1058     fn push_ast_cleanup_scope(&self, id: NodeInfo);
1059     fn push_loop_cleanup_scope(&self,
1060                                id: ast::NodeId,
1061                                exits: [Block<'blk, 'tcx>; EXIT_MAX]);
1062     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1063     fn push_custom_cleanup_scope_with_debug_loc(&self,
1064                                                 debug_loc: NodeInfo)
1065                                                 -> CustomScopeIndex;
1066     fn pop_and_trans_ast_cleanup_scope(&self,
1067                                               bcx: Block<'blk, 'tcx>,
1068                                               cleanup_scope: ast::NodeId)
1069                                               -> Block<'blk, 'tcx>;
1070     fn pop_loop_cleanup_scope(&self,
1071                               cleanup_scope: ast::NodeId);
1072     fn pop_custom_cleanup_scope(&self,
1073                                 custom_scope: CustomScopeIndex);
1074     fn pop_and_trans_custom_cleanup_scope(&self,
1075                                           bcx: Block<'blk, 'tcx>,
1076                                           custom_scope: CustomScopeIndex)
1077                                           -> Block<'blk, 'tcx>;
1078     fn top_loop_scope(&self) -> ast::NodeId;
1079     fn normal_exit_block(&'blk self,
1080                          cleanup_scope: ast::NodeId,
1081                          exit: uint) -> BasicBlockRef;
1082     fn return_exit_block(&'blk self) -> BasicBlockRef;
1083     fn schedule_lifetime_end(&self,
1084                          cleanup_scope: ScopeId,
1085                          val: ValueRef);
1086     fn schedule_drop_mem(&self,
1087                          cleanup_scope: ScopeId,
1088                          val: ValueRef,
1089                          ty: Ty<'tcx>);
1090     fn schedule_drop_and_zero_mem(&self,
1091                                   cleanup_scope: ScopeId,
1092                                   val: ValueRef,
1093                                   ty: Ty<'tcx>);
1094     fn schedule_drop_immediate(&self,
1095                                cleanup_scope: ScopeId,
1096                                val: ValueRef,
1097                                ty: Ty<'tcx>);
1098     fn schedule_free_value(&self,
1099                            cleanup_scope: ScopeId,
1100                            val: ValueRef,
1101                            heap: Heap,
1102                            content_ty: Ty<'tcx>);
1103     fn schedule_free_slice(&self,
1104                            cleanup_scope: ScopeId,
1105                            val: ValueRef,
1106                            size: ValueRef,
1107                            align: ValueRef,
1108                            heap: Heap);
1109     fn schedule_clean(&self,
1110                       cleanup_scope: ScopeId,
1111                       cleanup: CleanupObj<'tcx>);
1112     fn schedule_clean_in_ast_scope(&self,
1113                                    cleanup_scope: ast::NodeId,
1114                                    cleanup: CleanupObj<'tcx>);
1115     fn schedule_clean_in_custom_scope(&self,
1116                                     custom_scope: CustomScopeIndex,
1117                                     cleanup: CleanupObj<'tcx>);
1118     fn needs_invoke(&self) -> bool;
1119     fn get_landing_pad(&'blk self) -> BasicBlockRef;
1120 }
1121
1122 trait CleanupHelperMethods<'blk, 'tcx> {
1123     fn top_ast_scope(&self) -> Option<ast::NodeId>;
1124     fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1125     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1126     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1127     fn trans_scope_cleanups(&self,
1128                             bcx: Block<'blk, 'tcx>,
1129                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1130     fn trans_cleanups_to_exit_scope(&'blk self,
1131                                     label: EarlyExitLabel)
1132                                     -> BasicBlockRef;
1133     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1134     fn scopes_len(&self) -> uint;
1135     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1136     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1137     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
1138 }