]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/cleanup.rs
Auto merge of #22541 - Manishearth:rollup, r=Gankro
[rust.git] / src / librustc_trans / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! ## The Cleanup module
12 //!
13 //! The cleanup module tracks what values need to be cleaned up as scopes
14 //! are exited, either via panic or just normal control flow. The basic
15 //! idea is that the function context maintains a stack of cleanup scopes
16 //! that are pushed/popped as we traverse the AST tree. There is typically
17 //! at least one cleanup scope per AST node; some AST nodes may introduce
18 //! additional temporary scopes.
19 //!
20 //! Cleanup items can be scheduled into any of the scopes on the stack.
21 //! Typically, when a scope is popped, we will also generate the code for
22 //! each of its cleanups at that time. This corresponds to a normal exit
23 //! from a block (for example, an expression completing evaluation
24 //! successfully without panic). However, it is also possible to pop a
25 //! block *without* executing its cleanups; this is typically used to
26 //! guard intermediate values that must be cleaned up on panic, but not
27 //! if everything goes right. See the section on custom scopes below for
28 //! more details.
29 //!
30 //! Cleanup scopes come in three kinds:
31 //!
32 //! - **AST scopes:** each AST node in a function body has a corresponding
33 //!   AST scope. We push the AST scope when we start generate code for an AST
34 //!   node and pop it once the AST node has been fully generated.
35 //! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
36 //!   never scheduled into loop scopes; instead, they are used to record the
37 //!   basic blocks that we should branch to when a `continue` or `break` statement
38 //!   is encountered.
39 //! - **Custom scopes:** custom scopes are typically used to ensure cleanup
40 //!   of intermediate values.
41 //!
42 //! ### When to schedule cleanup
43 //!
44 //! Although the cleanup system is intended to *feel* fairly declarative,
45 //! it's still important to time calls to `schedule_clean()` correctly.
46 //! Basically, you should not schedule cleanup for memory until it has
47 //! been initialized, because if an unwind should occur before the memory
48 //! is fully initialized, then the cleanup will run and try to free or
49 //! drop uninitialized memory. If the initialization itself produces
50 //! byproducts that need to be freed, then you should use temporary custom
51 //! scopes to ensure that those byproducts will get freed on unwind.  For
52 //! example, an expression like `box foo()` will first allocate a box in the
53 //! heap and then call `foo()` -- if `foo()` should panic, this box needs
54 //! to be *shallowly* freed.
55 //!
56 //! ### Long-distance jumps
57 //!
58 //! In addition to popping a scope, which corresponds to normal control
59 //! flow exiting the scope, we may also *jump out* of a scope into some
60 //! earlier scope on the stack. This can occur in response to a `return`,
61 //! `break`, or `continue` statement, but also in response to panic. In
62 //! any of these cases, we will generate a series of cleanup blocks for
63 //! each of the scopes that is exited. So, if the stack contains scopes A
64 //! ... Z, and we break out of a loop whose corresponding cleanup scope is
65 //! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
66 //! After cleanup is done we would branch to the exit point for scope X.
67 //! But if panic should occur, we would generate cleanups for all the
68 //! scopes from A to Z and then resume the unwind process afterwards.
69 //!
70 //! To avoid generating tons of code, we cache the cleanup blocks that we
71 //! create for breaks, returns, unwinds, and other jumps. Whenever a new
72 //! cleanup is scheduled, though, we must clear these cached blocks. A
73 //! possible improvement would be to keep the cached blocks but simply
74 //! generate a new block which performs the additional cleanup and then
75 //! branches to the existing cached blocks.
76 //!
77 //! ### AST and loop cleanup scopes
78 //!
79 //! AST cleanup scopes are pushed when we begin and end processing an AST
80 //! node. They are used to house cleanups related to rvalue temporary that
81 //! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
82 //! AST scope is popped, we always trans all the cleanups, adding the cleanup
83 //! code after the postdominator of the AST node.
84 //!
85 //! AST nodes that represent breakable loops also push a loop scope; the
86 //! loop scope never has any actual cleanups, it's just used to point to
87 //! the basic blocks where control should flow after a "continue" or
88 //! "break" statement. Popping a loop scope never generates code.
89 //!
90 //! ### Custom cleanup scopes
91 //!
92 //! Custom cleanup scopes are used for a variety of purposes. The most
93 //! common though is to handle temporary byproducts, where cleanup only
94 //! needs to occur on panic. The general strategy is to push a custom
95 //! cleanup scope, schedule *shallow* cleanups into the custom scope, and
96 //! then pop the custom scope (without transing the cleanups) when
97 //! execution succeeds normally. This way the cleanups are only trans'd on
98 //! unwind, and only up until the point where execution succeeded, at
99 //! which time the complete value should be stored in an lvalue or some
100 //! other place where normal cleanup applies.
101 //!
102 //! To spell it out, here is an example. Imagine an expression `box expr`.
103 //! We would basically:
104 //!
105 //! 1. Push a custom cleanup scope C.
106 //! 2. Allocate the box.
107 //! 3. Schedule a shallow free in the scope C.
108 //! 4. Trans `expr` into the box.
109 //! 5. Pop the scope C.
110 //! 6. Return the box as an rvalue.
111 //!
112 //! This way, if a panic occurs while transing `expr`, the custom
113 //! cleanup scope C is pushed and hence the box will be freed. The trans
114 //! code for `expr` itself is responsible for freeing any other byproducts
115 //! that may be in play.
116
117 pub use self::ScopeId::*;
118 pub use self::CleanupScopeKind::*;
119 pub use self::EarlyExitLabel::*;
120 pub use self::Heap::*;
121
122 use llvm::{BasicBlockRef, ValueRef};
123 use trans::base;
124 use trans::build;
125 use trans::callee;
126 use trans::common;
127 use trans::common::{Block, FunctionContext, ExprId, NodeIdAndSpan};
128 use trans::debuginfo::{DebugLoc, ToDebugLoc};
129 use trans::glue;
130 use middle::region;
131 use trans::type_::Type;
132 use middle::ty::{self, Ty};
133 use std::fmt;
134 use syntax::ast;
135 use util::ppaux::Repr;
136
137 pub struct CleanupScope<'blk, 'tcx: 'blk> {
138     // The id of this cleanup scope. If the id is None,
139     // this is a *temporary scope* that is pushed during trans to
140     // cleanup miscellaneous garbage that trans may generate whose
141     // lifetime is a subset of some expression.  See module doc for
142     // more details.
143     kind: CleanupScopeKind<'blk, 'tcx>,
144
145     // Cleanups to run upon scope exit.
146     cleanups: Vec<CleanupObj<'tcx>>,
147
148     // The debug location any drop calls generated for this scope will be
149     // associated with.
150     debug_loc: DebugLoc,
151
152     cached_early_exits: Vec<CachedEarlyExit>,
153     cached_landing_pad: Option<BasicBlockRef>,
154 }
155
156 #[derive(Copy, Debug)]
157 pub struct CustomScopeIndex {
158     index: uint
159 }
160
161 pub const EXIT_BREAK: uint = 0;
162 pub const EXIT_LOOP: uint = 1;
163 pub const EXIT_MAX: uint = 2;
164
165 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
166     CustomScopeKind,
167     AstScopeKind(ast::NodeId),
168     LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
169 }
170
171 impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
172     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
173         match *self {
174             CustomScopeKind => write!(f, "CustomScopeKind"),
175             AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
176             LoopScopeKind(nid, ref blks) => {
177                 try!(write!(f, "LoopScopeKind({}, [", nid));
178                 for blk in blks {
179                     try!(write!(f, "{:p}, ", blk));
180                 }
181                 write!(f, "])")
182             }
183         }
184     }
185 }
186
187 #[derive(Copy, PartialEq, Debug)]
188 pub enum EarlyExitLabel {
189     UnwindExit,
190     ReturnExit,
191     LoopExit(ast::NodeId, uint)
192 }
193
194 #[derive(Copy)]
195 pub struct CachedEarlyExit {
196     label: EarlyExitLabel,
197     cleanup_block: BasicBlockRef,
198 }
199
200 pub trait Cleanup<'tcx> {
201     fn must_unwind(&self) -> bool;
202     fn clean_on_unwind(&self) -> bool;
203     fn is_lifetime_end(&self) -> bool;
204     fn trans<'blk>(&self,
205                    bcx: Block<'blk, 'tcx>,
206                    debug_loc: DebugLoc)
207                    -> Block<'blk, 'tcx>;
208 }
209
210 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
211
212 #[derive(Copy, Debug)]
213 pub enum ScopeId {
214     AstScope(ast::NodeId),
215     CustomScope(CustomScopeIndex)
216 }
217
218 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
219     /// Invoked when we start to trans the code contained within a new cleanup scope.
220     fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
221         debug!("push_ast_cleanup_scope({})",
222                self.ccx.tcx().map.node_to_string(debug_loc.id));
223
224         // FIXME(#2202) -- currently closure bodies have a parent
225         // region, which messes up the assertion below, since there
226         // are no cleanup scopes on the stack at the start of
227         // trans'ing a closure body.  I think though that this should
228         // eventually be fixed by closure bodies not having a parent
229         // region, though that's a touch unclear, and it might also be
230         // better just to narrow this assertion more (i.e., by
231         // excluding id's that correspond to closure bodies only). For
232         // now we just say that if there is already an AST scope on the stack,
233         // this new AST scope had better be its immediate child.
234         let top_scope = self.top_ast_scope();
235         if top_scope.is_some() {
236             assert!((self.ccx
237                      .tcx()
238                      .region_maps
239                      .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
240                      .map(|s|s.node_id()) == top_scope)
241                     ||
242                     (self.ccx
243                      .tcx()
244                      .region_maps
245                      .opt_encl_scope(region::CodeExtent::DestructionScope(debug_loc.id))
246                      .map(|s|s.node_id()) == top_scope));
247         }
248
249         self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
250                                           debug_loc.debug_loc()));
251     }
252
253     fn push_loop_cleanup_scope(&self,
254                                id: ast::NodeId,
255                                exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
256         debug!("push_loop_cleanup_scope({})",
257                self.ccx.tcx().map.node_to_string(id));
258         assert_eq!(Some(id), self.top_ast_scope());
259
260         // Just copy the debuginfo source location from the enclosing scope
261         let debug_loc = self.scopes
262                             .borrow()
263                             .last()
264                             .unwrap()
265                             .debug_loc;
266
267         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
268     }
269
270     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
271         let index = self.scopes_len();
272         debug!("push_custom_cleanup_scope(): {}", index);
273
274         // Just copy the debuginfo source location from the enclosing scope
275         let debug_loc = self.scopes
276                             .borrow()
277                             .last()
278                             .map(|opt_scope| opt_scope.debug_loc)
279                             .unwrap_or(DebugLoc::None);
280
281         self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
282         CustomScopeIndex { index: index }
283     }
284
285     fn push_custom_cleanup_scope_with_debug_loc(&self,
286                                                 debug_loc: NodeIdAndSpan)
287                                                 -> CustomScopeIndex {
288         let index = self.scopes_len();
289         debug!("push_custom_cleanup_scope(): {}", index);
290
291         self.push_scope(CleanupScope::new(CustomScopeKind,
292                                           debug_loc.debug_loc()));
293         CustomScopeIndex { index: index }
294     }
295
296     /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
297     /// stack, and generates the code to do its cleanups for normal exit.
298     fn pop_and_trans_ast_cleanup_scope(&self,
299                                        bcx: Block<'blk, 'tcx>,
300                                        cleanup_scope: ast::NodeId)
301                                        -> Block<'blk, 'tcx> {
302         debug!("pop_and_trans_ast_cleanup_scope({})",
303                self.ccx.tcx().map.node_to_string(cleanup_scope));
304
305         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
306
307         let scope = self.pop_scope();
308         self.trans_scope_cleanups(bcx, &scope)
309     }
310
311     /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
312     /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
313     /// branching to a block generated by `normal_exit_block`.
314     fn pop_loop_cleanup_scope(&self,
315                               cleanup_scope: ast::NodeId) {
316         debug!("pop_loop_cleanup_scope({})",
317                self.ccx.tcx().map.node_to_string(cleanup_scope));
318
319         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
320
321         let _ = self.pop_scope();
322     }
323
324     /// Removes the top cleanup scope from the stack without executing its cleanups. The top
325     /// cleanup scope must be the temporary scope `custom_scope`.
326     fn pop_custom_cleanup_scope(&self,
327                                 custom_scope: CustomScopeIndex) {
328         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
329         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
330         let _ = self.pop_scope();
331     }
332
333     /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
334     /// generates the code to do its cleanups for normal exit.
335     fn pop_and_trans_custom_cleanup_scope(&self,
336                                           bcx: Block<'blk, 'tcx>,
337                                           custom_scope: CustomScopeIndex)
338                                           -> Block<'blk, 'tcx> {
339         debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
340         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
341
342         let scope = self.pop_scope();
343         self.trans_scope_cleanups(bcx, &scope)
344     }
345
346     /// Returns the id of the top-most loop scope
347     fn top_loop_scope(&self) -> ast::NodeId {
348         for scope in self.scopes.borrow().iter().rev() {
349             if let LoopScopeKind(id, _) = scope.kind {
350                 return id;
351             }
352         }
353         self.ccx.sess().bug("no loop scope found");
354     }
355
356     /// Returns a block to branch to which will perform all pending cleanups and then
357     /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
358     fn normal_exit_block(&'blk self,
359                          cleanup_scope: ast::NodeId,
360                          exit: uint) -> BasicBlockRef {
361         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
362     }
363
364     /// Returns a block to branch to which will perform all pending cleanups and then return from
365     /// this function
366     fn return_exit_block(&'blk self) -> BasicBlockRef {
367         self.trans_cleanups_to_exit_scope(ReturnExit)
368     }
369
370     fn schedule_lifetime_end(&self,
371                              cleanup_scope: ScopeId,
372                              val: ValueRef) {
373         let drop = box LifetimeEnd {
374             ptr: val,
375         };
376
377         debug!("schedule_lifetime_end({:?}, val={})",
378                cleanup_scope,
379                self.ccx.tn().val_to_string(val));
380
381         self.schedule_clean(cleanup_scope, drop as CleanupObj);
382     }
383
384     /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
385     fn schedule_drop_mem(&self,
386                          cleanup_scope: ScopeId,
387                          val: ValueRef,
388                          ty: Ty<'tcx>) {
389         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
390         let drop = box DropValue {
391             is_immediate: false,
392             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
393             val: val,
394             ty: ty,
395             zero: false
396         };
397
398         debug!("schedule_drop_mem({:?}, val={}, ty={})",
399                cleanup_scope,
400                self.ccx.tn().val_to_string(val),
401                ty.repr(self.ccx.tcx()));
402
403         self.schedule_clean(cleanup_scope, drop as CleanupObj);
404     }
405
406     /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
407     fn schedule_drop_and_zero_mem(&self,
408                                   cleanup_scope: ScopeId,
409                                   val: ValueRef,
410                                   ty: Ty<'tcx>) {
411         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
412         let drop = box DropValue {
413             is_immediate: false,
414             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
415             val: val,
416             ty: ty,
417             zero: true
418         };
419
420         debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
421                cleanup_scope,
422                self.ccx.tn().val_to_string(val),
423                ty.repr(self.ccx.tcx()),
424                true);
425
426         self.schedule_clean(cleanup_scope, drop as CleanupObj);
427     }
428
429     /// Schedules a (deep) drop of `val`, which is an instance of `ty`
430     fn schedule_drop_immediate(&self,
431                                cleanup_scope: ScopeId,
432                                val: ValueRef,
433                                ty: Ty<'tcx>) {
434
435         if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
436         let drop = box DropValue {
437             is_immediate: true,
438             must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
439             val: val,
440             ty: ty,
441             zero: false
442         };
443
444         debug!("schedule_drop_immediate({:?}, val={}, ty={:?})",
445                cleanup_scope,
446                self.ccx.tn().val_to_string(val),
447                ty.repr(self.ccx.tcx()));
448
449         self.schedule_clean(cleanup_scope, drop as CleanupObj);
450     }
451
452     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
453     fn schedule_free_value(&self,
454                            cleanup_scope: ScopeId,
455                            val: ValueRef,
456                            heap: Heap,
457                            content_ty: Ty<'tcx>) {
458         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
459
460         debug!("schedule_free_value({:?}, val={}, heap={:?})",
461                cleanup_scope,
462                self.ccx.tn().val_to_string(val),
463                heap);
464
465         self.schedule_clean(cleanup_scope, drop as CleanupObj);
466     }
467
468     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
469     fn schedule_free_slice(&self,
470                            cleanup_scope: ScopeId,
471                            val: ValueRef,
472                            size: ValueRef,
473                            align: ValueRef,
474                            heap: Heap) {
475         let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
476
477         debug!("schedule_free_slice({:?}, val={}, heap={:?})",
478                cleanup_scope,
479                self.ccx.tn().val_to_string(val),
480                heap);
481
482         self.schedule_clean(cleanup_scope, drop as CleanupObj);
483     }
484
485     fn schedule_clean(&self,
486                       cleanup_scope: ScopeId,
487                       cleanup: CleanupObj<'tcx>) {
488         match cleanup_scope {
489             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
490             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
491         }
492     }
493
494     /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
495     /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
496     /// scope.
497     fn schedule_clean_in_ast_scope(&self,
498                                    cleanup_scope: ast::NodeId,
499                                    cleanup: CleanupObj<'tcx>) {
500         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
501                cleanup_scope);
502
503         for scope in self.scopes.borrow_mut().iter_mut().rev() {
504             if scope.kind.is_ast_with_id(cleanup_scope) {
505                 scope.cleanups.push(cleanup);
506                 scope.clear_cached_exits();
507                 return;
508             } else {
509                 // will be adding a cleanup to some enclosing scope
510                 scope.clear_cached_exits();
511             }
512         }
513
514         self.ccx.sess().bug(
515             &format!("no cleanup scope {} found",
516                     self.ccx.tcx().map.node_to_string(cleanup_scope))[]);
517     }
518
519     /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
520     fn schedule_clean_in_custom_scope(&self,
521                                       custom_scope: CustomScopeIndex,
522                                       cleanup: CleanupObj<'tcx>) {
523         debug!("schedule_clean_in_custom_scope(custom_scope={})",
524                custom_scope.index);
525
526         assert!(self.is_valid_custom_scope(custom_scope));
527
528         let mut scopes = self.scopes.borrow_mut();
529         let scope = &mut (*scopes)[custom_scope.index];
530         scope.cleanups.push(cleanup);
531         scope.clear_cached_exits();
532     }
533
534     /// Returns true if there are pending cleanups that should execute on panic.
535     fn needs_invoke(&self) -> bool {
536         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
537     }
538
539     /// Returns a basic block to branch to in the event of a panic. This block will run the panic
540     /// cleanups and eventually invoke the LLVM `Resume` instruction.
541     fn get_landing_pad(&'blk self) -> BasicBlockRef {
542         let _icx = base::push_ctxt("get_landing_pad");
543
544         debug!("get_landing_pad");
545
546         let orig_scopes_len = self.scopes_len();
547         assert!(orig_scopes_len > 0);
548
549         // Remove any scopes that do not have cleanups on panic:
550         let mut popped_scopes = vec!();
551         while !self.top_scope(|s| s.needs_invoke()) {
552             debug!("top scope does not need invoke");
553             popped_scopes.push(self.pop_scope());
554         }
555
556         // Check for an existing landing pad in the new topmost scope:
557         let llbb = self.get_or_create_landing_pad();
558
559         // Push the scopes we removed back on:
560         loop {
561             match popped_scopes.pop() {
562                 Some(scope) => self.push_scope(scope),
563                 None => break
564             }
565         }
566
567         assert_eq!(self.scopes_len(), orig_scopes_len);
568
569         return llbb;
570     }
571 }
572
573 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
574     /// Returns the id of the current top-most AST scope, if any.
575     fn top_ast_scope(&self) -> Option<ast::NodeId> {
576         for scope in self.scopes.borrow().iter().rev() {
577             match scope.kind {
578                 CustomScopeKind | LoopScopeKind(..) => {}
579                 AstScopeKind(i) => {
580                     return Some(i);
581                 }
582             }
583         }
584         None
585     }
586
587     fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
588         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
589     }
590
591     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
592         self.is_valid_custom_scope(custom_scope) &&
593             custom_scope.index == self.scopes.borrow().len() - 1
594     }
595
596     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
597         let scopes = self.scopes.borrow();
598         custom_scope.index < scopes.len() &&
599             (*scopes)[custom_scope.index].kind.is_temp()
600     }
601
602     /// Generates the cleanups for `scope` into `bcx`
603     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
604                             bcx: Block<'blk, 'tcx>,
605                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
606
607         let mut bcx = bcx;
608         if !bcx.unreachable.get() {
609             for cleanup in scope.cleanups.iter().rev() {
610                 bcx = cleanup.trans(bcx, scope.debug_loc);
611             }
612         }
613         bcx
614     }
615
616     fn scopes_len(&self) -> uint {
617         self.scopes.borrow().len()
618     }
619
620     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
621         self.scopes.borrow_mut().push(scope)
622     }
623
624     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
625         debug!("popping cleanup scope {}, {} scopes remaining",
626                self.top_scope(|s| s.block_name("")),
627                self.scopes_len() - 1);
628
629         self.scopes.borrow_mut().pop().unwrap()
630     }
631
632     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
633         f(self.scopes.borrow().last().unwrap())
634     }
635
636     /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
637     /// unwind. This function will generate all cleanups between the top of the stack and the exit
638     /// `label` and return a basic block that the caller can branch to.
639     ///
640     /// For example, if the current stack of cleanups were as follows:
641     ///
642     ///      AST 22
643     ///      Custom 1
644     ///      AST 23
645     ///      Loop 23
646     ///      Custom 2
647     ///      AST 24
648     ///
649     /// and the `label` specifies a break from `Loop 23`, then this function would generate a
650     /// series of basic blocks as follows:
651     ///
652     ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
653     ///
654     /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
655     /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
656     /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
657     /// `break_blk`.
658     fn trans_cleanups_to_exit_scope(&'blk self,
659                                     label: EarlyExitLabel)
660                                     -> BasicBlockRef {
661         debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
662                label, self.scopes_len());
663
664         let orig_scopes_len = self.scopes_len();
665         let mut prev_llbb;
666         let mut popped_scopes = vec!();
667
668         // First we pop off all the cleanup stacks that are
669         // traversed until the exit is reached, pushing them
670         // onto the side vector `popped_scopes`. No code is
671         // generated at this time.
672         //
673         // So, continuing the example from above, we would wind up
674         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
675         // (Presuming that there are no cached exits)
676         loop {
677             if self.scopes_len() == 0 {
678                 match label {
679                     UnwindExit => {
680                         // Generate a block that will `Resume`.
681                         let prev_bcx = self.new_block(true, "resume", None);
682                         let personality = self.personality.get().expect(
683                             "create_landing_pad() should have set this");
684                         build::Resume(prev_bcx,
685                                       build::Load(prev_bcx, personality));
686                         prev_llbb = prev_bcx.llbb;
687                         break;
688                     }
689
690                     ReturnExit => {
691                         prev_llbb = self.get_llreturn();
692                         break;
693                     }
694
695                     LoopExit(id, _) => {
696                         self.ccx.sess().bug(&format!(
697                                 "cannot exit from scope {}, \
698                                 not in scope", id)[]);
699                     }
700                 }
701             }
702
703             // Check if we have already cached the unwinding of this
704             // scope for this label. If so, we can stop popping scopes
705             // and branch to the cached label, since it contains the
706             // cleanups for any subsequent scopes.
707             match self.top_scope(|s| s.cached_early_exit(label)) {
708                 Some(cleanup_block) => {
709                     prev_llbb = cleanup_block;
710                     break;
711                 }
712                 None => { }
713             }
714
715             // Pop off the scope, since we will be generating
716             // unwinding code for it. If we are searching for a loop exit,
717             // and this scope is that loop, then stop popping and set
718             // `prev_llbb` to the appropriate exit block from the loop.
719             popped_scopes.push(self.pop_scope());
720             let scope = popped_scopes.last().unwrap();
721             match label {
722                 UnwindExit | ReturnExit => { }
723                 LoopExit(id, exit) => {
724                     match scope.kind.early_exit_block(id, exit) {
725                         Some(exitllbb) => {
726                             prev_llbb = exitllbb;
727                             break;
728                         }
729
730                         None => { }
731                     }
732                 }
733             }
734         }
735
736         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
737                popped_scopes.len());
738
739         // Now push the popped scopes back on. As we go,
740         // we track in `prev_llbb` the exit to which this scope
741         // should branch when it's done.
742         //
743         // So, continuing with our example, we will start out with
744         // `prev_llbb` being set to `break_blk` (or possibly a cached
745         // early exit). We will then pop the scopes from `popped_scopes`
746         // and generate a basic block for each one, prepending it in the
747         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
748         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
749         // branch to `prev_llbb == break_blk`, giving us a sequence like:
750         //
751         //     Cleanup(Custom 2) -> prev_llbb
752         //
753         // We then pop `AST 24` and repeat the process, giving us the sequence:
754         //
755         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
756         //
757         // At this point, `popped_scopes` is empty, and so the final block
758         // that we return to the user is `Cleanup(AST 24)`.
759         while !popped_scopes.is_empty() {
760             let mut scope = popped_scopes.pop().unwrap();
761
762             if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
763             {
764                 let name = scope.block_name("clean");
765                 debug!("generating cleanups for {}", name);
766                 let bcx_in = self.new_block(label.is_unwind(),
767                                             &name[..],
768                                             None);
769                 let mut bcx_out = bcx_in;
770                 for cleanup in scope.cleanups.iter().rev() {
771                     if cleanup_is_suitable_for(&**cleanup, label) {
772                         bcx_out = cleanup.trans(bcx_out,
773                                                 scope.debug_loc);
774                     }
775                 }
776                 build::Br(bcx_out, prev_llbb, DebugLoc::None);
777                 prev_llbb = bcx_in.llbb;
778             } else {
779                 debug!("no suitable cleanups in {}",
780                        scope.block_name("clean"));
781             }
782
783             scope.add_cached_early_exit(label, prev_llbb);
784             self.push_scope(scope);
785         }
786
787         debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
788
789         assert_eq!(self.scopes_len(), orig_scopes_len);
790         prev_llbb
791     }
792
793     /// Creates a landing pad for the top scope, if one does not exist.  The landing pad will
794     /// perform all cleanups necessary for an unwind and then `resume` to continue error
795     /// propagation:
796     ///
797     ///     landing_pad -> ... cleanups ... -> [resume]
798     ///
799     /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
800     /// in this function itself.)
801     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
802         let pad_bcx;
803
804         debug!("get_or_create_landing_pad");
805
806         // Check if a landing pad block exists; if not, create one.
807         {
808             let mut scopes = self.scopes.borrow_mut();
809             let last_scope = scopes.last_mut().unwrap();
810             match last_scope.cached_landing_pad {
811                 Some(llbb) => { return llbb; }
812                 None => {
813                     let name = last_scope.block_name("unwind");
814                     pad_bcx = self.new_block(true, &name[..], None);
815                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
816                 }
817             }
818         }
819
820         // The landing pad return type (the type being propagated). Not sure what
821         // this represents but it's determined by the personality function and
822         // this is what the EH proposal example uses.
823         let llretty = Type::struct_(self.ccx,
824                                     &[Type::i8p(self.ccx), Type::i32(self.ccx)],
825                                     false);
826
827         // The exception handling personality function.
828         //
829         // If our compilation unit has the `eh_personality` lang item somewhere
830         // within it, then we just need to translate that. Otherwise, we're
831         // building an rlib which will depend on some upstream implementation of
832         // this function, so we just codegen a generic reference to it. We don't
833         // specify any of the types for the function, we just make it a symbol
834         // that LLVM can later use.
835         let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
836             Some(def_id) => {
837                 callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0),
838                                      pad_bcx.fcx.param_substs).val
839             }
840             None => {
841                 let mut personality = self.ccx.eh_personality().borrow_mut();
842                 match *personality {
843                     Some(llpersonality) => llpersonality,
844                     None => {
845                         let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
846                         let f = base::decl_cdecl_fn(self.ccx,
847                                                     "rust_eh_personality",
848                                                     fty,
849                                                     self.ccx.tcx().types.i32);
850                         *personality = Some(f);
851                         f
852                     }
853                 }
854             }
855         };
856
857         // The only landing pad clause will be 'cleanup'
858         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
859
860         // The landing pad block is a cleanup
861         build::SetCleanup(pad_bcx, llretval);
862
863         // We store the retval in a function-central alloca, so that calls to
864         // Resume can find it.
865         match self.personality.get() {
866             Some(addr) => {
867                 build::Store(pad_bcx, llretval, addr);
868             }
869             None => {
870                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
871                 self.personality.set(Some(addr));
872                 build::Store(pad_bcx, llretval, addr);
873             }
874         }
875
876         // Generate the cleanup block and branch to it.
877         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
878         build::Br(pad_bcx, cleanup_llbb, DebugLoc::None);
879
880         return pad_bcx.llbb;
881     }
882 }
883
884 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
885     fn new(kind: CleanupScopeKind<'blk, 'tcx>,
886            debug_loc: DebugLoc)
887         -> CleanupScope<'blk, 'tcx> {
888         CleanupScope {
889             kind: kind,
890             debug_loc: debug_loc,
891             cleanups: vec!(),
892             cached_early_exits: vec!(),
893             cached_landing_pad: None,
894         }
895     }
896
897     fn clear_cached_exits(&mut self) {
898         self.cached_early_exits = vec!();
899         self.cached_landing_pad = None;
900     }
901
902     fn cached_early_exit(&self,
903                          label: EarlyExitLabel)
904                          -> Option<BasicBlockRef> {
905         self.cached_early_exits.iter().
906             find(|e| e.label == label).
907             map(|e| e.cleanup_block)
908     }
909
910     fn add_cached_early_exit(&mut self,
911                              label: EarlyExitLabel,
912                              blk: BasicBlockRef) {
913         self.cached_early_exits.push(
914             CachedEarlyExit { label: label,
915                               cleanup_block: blk });
916     }
917
918     /// True if this scope has cleanups that need unwinding
919     fn needs_invoke(&self) -> bool {
920
921         self.cached_landing_pad.is_some() ||
922             self.cleanups.iter().any(|c| c.must_unwind())
923     }
924
925     /// Returns a suitable name to use for the basic block that handles this cleanup scope
926     fn block_name(&self, prefix: &str) -> String {
927         match self.kind {
928             CustomScopeKind => format!("{}_custom_", prefix),
929             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
930             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
931         }
932     }
933
934     pub fn drop_non_lifetime_clean(&mut self) {
935         self.cleanups.retain(|c| c.is_lifetime_end());
936     }
937 }
938
939 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
940     fn is_temp(&self) -> bool {
941         match *self {
942             CustomScopeKind => true,
943             LoopScopeKind(..) | AstScopeKind(..) => false,
944         }
945     }
946
947     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
948         match *self {
949             CustomScopeKind | LoopScopeKind(..) => false,
950             AstScopeKind(i) => i == id
951         }
952     }
953
954     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
955         match *self {
956             CustomScopeKind | AstScopeKind(..) => false,
957             LoopScopeKind(i, _) => i == id
958         }
959     }
960
961     /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
962     fn early_exit_block(&self,
963                         id: ast::NodeId,
964                         exit: uint) -> Option<BasicBlockRef> {
965         match *self {
966             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
967             _ => None,
968         }
969     }
970 }
971
972 impl EarlyExitLabel {
973     fn is_unwind(&self) -> bool {
974         match *self {
975             UnwindExit => true,
976             _ => false
977         }
978     }
979 }
980
981 ///////////////////////////////////////////////////////////////////////////
982 // Cleanup types
983
984 #[derive(Copy)]
985 pub struct DropValue<'tcx> {
986     is_immediate: bool,
987     must_unwind: bool,
988     val: ValueRef,
989     ty: Ty<'tcx>,
990     zero: bool
991 }
992
993 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
994     fn must_unwind(&self) -> bool {
995         self.must_unwind
996     }
997
998     fn clean_on_unwind(&self) -> bool {
999         self.must_unwind
1000     }
1001
1002     fn is_lifetime_end(&self) -> bool {
1003         false
1004     }
1005
1006     fn trans<'blk>(&self,
1007                    bcx: Block<'blk, 'tcx>,
1008                    debug_loc: DebugLoc)
1009                    -> Block<'blk, 'tcx> {
1010         let bcx = if self.is_immediate {
1011             glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
1012         } else {
1013             glue::drop_ty(bcx, self.val, self.ty, debug_loc)
1014         };
1015         if self.zero {
1016             base::zero_mem(bcx, self.val, self.ty);
1017         }
1018         bcx
1019     }
1020 }
1021
1022 #[derive(Copy, Debug)]
1023 pub enum Heap {
1024     HeapExchange
1025 }
1026
1027 #[derive(Copy)]
1028 pub struct FreeValue<'tcx> {
1029     ptr: ValueRef,
1030     heap: Heap,
1031     content_ty: Ty<'tcx>
1032 }
1033
1034 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
1035     fn must_unwind(&self) -> bool {
1036         true
1037     }
1038
1039     fn clean_on_unwind(&self) -> bool {
1040         true
1041     }
1042
1043     fn is_lifetime_end(&self) -> bool {
1044         false
1045     }
1046
1047     fn trans<'blk>(&self,
1048                    bcx: Block<'blk, 'tcx>,
1049                    debug_loc: DebugLoc)
1050                    -> Block<'blk, 'tcx> {
1051         match self.heap {
1052             HeapExchange => {
1053                 glue::trans_exchange_free_ty(bcx,
1054                                              self.ptr,
1055                                              self.content_ty,
1056                                              debug_loc)
1057             }
1058         }
1059     }
1060 }
1061
1062 #[derive(Copy)]
1063 pub struct FreeSlice {
1064     ptr: ValueRef,
1065     size: ValueRef,
1066     align: ValueRef,
1067     heap: Heap,
1068 }
1069
1070 impl<'tcx> Cleanup<'tcx> for FreeSlice {
1071     fn must_unwind(&self) -> bool {
1072         true
1073     }
1074
1075     fn clean_on_unwind(&self) -> bool {
1076         true
1077     }
1078
1079     fn is_lifetime_end(&self) -> bool {
1080         false
1081     }
1082
1083     fn trans<'blk>(&self,
1084                    bcx: Block<'blk, 'tcx>,
1085                    debug_loc: DebugLoc)
1086                    -> Block<'blk, 'tcx> {
1087         match self.heap {
1088             HeapExchange => {
1089                 glue::trans_exchange_free_dyn(bcx,
1090                                               self.ptr,
1091                                               self.size,
1092                                               self.align,
1093                                               debug_loc)
1094             }
1095         }
1096     }
1097 }
1098
1099 #[derive(Copy)]
1100 pub struct LifetimeEnd {
1101     ptr: ValueRef,
1102 }
1103
1104 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
1105     fn must_unwind(&self) -> bool {
1106         false
1107     }
1108
1109     fn clean_on_unwind(&self) -> bool {
1110         true
1111     }
1112
1113     fn is_lifetime_end(&self) -> bool {
1114         true
1115     }
1116
1117     fn trans<'blk>(&self,
1118                    bcx: Block<'blk, 'tcx>,
1119                    debug_loc: DebugLoc)
1120                    -> Block<'blk, 'tcx> {
1121         debug_loc.apply(bcx.fcx);
1122         base::call_lifetime_end(bcx, self.ptr);
1123         bcx
1124     }
1125 }
1126
1127 pub fn temporary_scope(tcx: &ty::ctxt,
1128                        id: ast::NodeId)
1129                        -> ScopeId {
1130     match tcx.region_maps.temporary_scope(id) {
1131         Some(scope) => {
1132             let r = AstScope(scope.node_id());
1133             debug!("temporary_scope({}) = {:?}", id, r);
1134             r
1135         }
1136         None => {
1137             tcx.sess.bug(&format!("no temporary scope available for expr {}",
1138                                  id)[])
1139         }
1140     }
1141 }
1142
1143 pub fn var_scope(tcx: &ty::ctxt,
1144                  id: ast::NodeId)
1145                  -> ScopeId {
1146     let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1147     debug!("var_scope({}) = {:?}", id, r);
1148     r
1149 }
1150
1151 fn cleanup_is_suitable_for(c: &Cleanup,
1152                            label: EarlyExitLabel) -> bool {
1153     !label.is_unwind() || c.clean_on_unwind()
1154 }
1155
1156 ///////////////////////////////////////////////////////////////////////////
1157 // These traits just exist to put the methods into this file.
1158
1159 pub trait CleanupMethods<'blk, 'tcx> {
1160     fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
1161     fn push_loop_cleanup_scope(&self,
1162                                id: ast::NodeId,
1163                                exits: [Block<'blk, 'tcx>; EXIT_MAX]);
1164     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1165     fn push_custom_cleanup_scope_with_debug_loc(&self,
1166                                                 debug_loc: NodeIdAndSpan)
1167                                                 -> CustomScopeIndex;
1168     fn pop_and_trans_ast_cleanup_scope(&self,
1169                                        bcx: Block<'blk, 'tcx>,
1170                                        cleanup_scope: ast::NodeId)
1171                                        -> Block<'blk, 'tcx>;
1172     fn pop_loop_cleanup_scope(&self,
1173                               cleanup_scope: ast::NodeId);
1174     fn pop_custom_cleanup_scope(&self,
1175                                 custom_scope: CustomScopeIndex);
1176     fn pop_and_trans_custom_cleanup_scope(&self,
1177                                           bcx: Block<'blk, 'tcx>,
1178                                           custom_scope: CustomScopeIndex)
1179                                           -> Block<'blk, 'tcx>;
1180     fn top_loop_scope(&self) -> ast::NodeId;
1181     fn normal_exit_block(&'blk self,
1182                          cleanup_scope: ast::NodeId,
1183                          exit: uint) -> BasicBlockRef;
1184     fn return_exit_block(&'blk self) -> BasicBlockRef;
1185     fn schedule_lifetime_end(&self,
1186                          cleanup_scope: ScopeId,
1187                          val: ValueRef);
1188     fn schedule_drop_mem(&self,
1189                          cleanup_scope: ScopeId,
1190                          val: ValueRef,
1191                          ty: Ty<'tcx>);
1192     fn schedule_drop_and_zero_mem(&self,
1193                                   cleanup_scope: ScopeId,
1194                                   val: ValueRef,
1195                                   ty: Ty<'tcx>);
1196     fn schedule_drop_immediate(&self,
1197                                cleanup_scope: ScopeId,
1198                                val: ValueRef,
1199                                ty: Ty<'tcx>);
1200     fn schedule_free_value(&self,
1201                            cleanup_scope: ScopeId,
1202                            val: ValueRef,
1203                            heap: Heap,
1204                            content_ty: Ty<'tcx>);
1205     fn schedule_free_slice(&self,
1206                            cleanup_scope: ScopeId,
1207                            val: ValueRef,
1208                            size: ValueRef,
1209                            align: ValueRef,
1210                            heap: Heap);
1211     fn schedule_clean(&self,
1212                       cleanup_scope: ScopeId,
1213                       cleanup: CleanupObj<'tcx>);
1214     fn schedule_clean_in_ast_scope(&self,
1215                                    cleanup_scope: ast::NodeId,
1216                                    cleanup: CleanupObj<'tcx>);
1217     fn schedule_clean_in_custom_scope(&self,
1218                                     custom_scope: CustomScopeIndex,
1219                                     cleanup: CleanupObj<'tcx>);
1220     fn needs_invoke(&self) -> bool;
1221     fn get_landing_pad(&'blk self) -> BasicBlockRef;
1222 }
1223
1224 trait CleanupHelperMethods<'blk, 'tcx> {
1225     fn top_ast_scope(&self) -> Option<ast::NodeId>;
1226     fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
1227     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1228     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1229     fn trans_scope_cleanups(&self,
1230                             bcx: Block<'blk, 'tcx>,
1231                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1232     fn trans_cleanups_to_exit_scope(&'blk self,
1233                                     label: EarlyExitLabel)
1234                                     -> BasicBlockRef;
1235     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1236     fn scopes_len(&self) -> uint;
1237     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1238     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1239     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
1240 }