]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/cleanup.rs
Extend `trans::datum::Lvalue` so that it carrys an optional dropflag hint.
[rust.git] / src / librustc_trans / trans / cleanup.rs
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! ## The Cleanup module
12 //!
13 //! The cleanup module tracks what values need to be cleaned up as scopes
14 //! are exited, either via panic or just normal control flow. The basic
15 //! idea is that the function context maintains a stack of cleanup scopes
16 //! that are pushed/popped as we traverse the AST tree. There is typically
17 //! at least one cleanup scope per AST node; some AST nodes may introduce
18 //! additional temporary scopes.
19 //!
20 //! Cleanup items can be scheduled into any of the scopes on the stack.
21 //! Typically, when a scope is popped, we will also generate the code for
22 //! each of its cleanups at that time. This corresponds to a normal exit
23 //! from a block (for example, an expression completing evaluation
24 //! successfully without panic). However, it is also possible to pop a
25 //! block *without* executing its cleanups; this is typically used to
26 //! guard intermediate values that must be cleaned up on panic, but not
27 //! if everything goes right. See the section on custom scopes below for
28 //! more details.
29 //!
30 //! Cleanup scopes come in three kinds:
31 //!
32 //! - **AST scopes:** each AST node in a function body has a corresponding
33 //!   AST scope. We push the AST scope when we start generate code for an AST
34 //!   node and pop it once the AST node has been fully generated.
35 //! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
36 //!   never scheduled into loop scopes; instead, they are used to record the
37 //!   basic blocks that we should branch to when a `continue` or `break` statement
38 //!   is encountered.
39 //! - **Custom scopes:** custom scopes are typically used to ensure cleanup
40 //!   of intermediate values.
41 //!
42 //! ### When to schedule cleanup
43 //!
44 //! Although the cleanup system is intended to *feel* fairly declarative,
45 //! it's still important to time calls to `schedule_clean()` correctly.
46 //! Basically, you should not schedule cleanup for memory until it has
47 //! been initialized, because if an unwind should occur before the memory
48 //! is fully initialized, then the cleanup will run and try to free or
49 //! drop uninitialized memory. If the initialization itself produces
50 //! byproducts that need to be freed, then you should use temporary custom
51 //! scopes to ensure that those byproducts will get freed on unwind.  For
52 //! example, an expression like `box foo()` will first allocate a box in the
53 //! heap and then call `foo()` -- if `foo()` should panic, this box needs
54 //! to be *shallowly* freed.
55 //!
56 //! ### Long-distance jumps
57 //!
58 //! In addition to popping a scope, which corresponds to normal control
59 //! flow exiting the scope, we may also *jump out* of a scope into some
60 //! earlier scope on the stack. This can occur in response to a `return`,
61 //! `break`, or `continue` statement, but also in response to panic. In
62 //! any of these cases, we will generate a series of cleanup blocks for
63 //! each of the scopes that is exited. So, if the stack contains scopes A
64 //! ... Z, and we break out of a loop whose corresponding cleanup scope is
65 //! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
66 //! After cleanup is done we would branch to the exit point for scope X.
67 //! But if panic should occur, we would generate cleanups for all the
68 //! scopes from A to Z and then resume the unwind process afterwards.
69 //!
70 //! To avoid generating tons of code, we cache the cleanup blocks that we
71 //! create for breaks, returns, unwinds, and other jumps. Whenever a new
72 //! cleanup is scheduled, though, we must clear these cached blocks. A
73 //! possible improvement would be to keep the cached blocks but simply
74 //! generate a new block which performs the additional cleanup and then
75 //! branches to the existing cached blocks.
76 //!
77 //! ### AST and loop cleanup scopes
78 //!
79 //! AST cleanup scopes are pushed when we begin and end processing an AST
80 //! node. They are used to house cleanups related to rvalue temporary that
81 //! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
82 //! AST scope is popped, we always trans all the cleanups, adding the cleanup
83 //! code after the postdominator of the AST node.
84 //!
85 //! AST nodes that represent breakable loops also push a loop scope; the
86 //! loop scope never has any actual cleanups, it's just used to point to
87 //! the basic blocks where control should flow after a "continue" or
88 //! "break" statement. Popping a loop scope never generates code.
89 //!
90 //! ### Custom cleanup scopes
91 //!
92 //! Custom cleanup scopes are used for a variety of purposes. The most
93 //! common though is to handle temporary byproducts, where cleanup only
94 //! needs to occur on panic. The general strategy is to push a custom
95 //! cleanup scope, schedule *shallow* cleanups into the custom scope, and
96 //! then pop the custom scope (without transing the cleanups) when
97 //! execution succeeds normally. This way the cleanups are only trans'd on
98 //! unwind, and only up until the point where execution succeeded, at
99 //! which time the complete value should be stored in an lvalue or some
100 //! other place where normal cleanup applies.
101 //!
102 //! To spell it out, here is an example. Imagine an expression `box expr`.
103 //! We would basically:
104 //!
105 //! 1. Push a custom cleanup scope C.
106 //! 2. Allocate the box.
107 //! 3. Schedule a shallow free in the scope C.
108 //! 4. Trans `expr` into the box.
109 //! 5. Pop the scope C.
110 //! 6. Return the box as an rvalue.
111 //!
112 //! This way, if a panic occurs while transing `expr`, the custom
113 //! cleanup scope C is pushed and hence the box will be freed. The trans
114 //! code for `expr` itself is responsible for freeing any other byproducts
115 //! that may be in play.
116
117 pub use self::ScopeId::*;
118 pub use self::CleanupScopeKind::*;
119 pub use self::EarlyExitLabel::*;
120 pub use self::Heap::*;
121
122 use llvm::{BasicBlockRef, ValueRef};
123 use trans::base;
124 use trans::build;
125 use trans::common;
126 use trans::common::{Block, FunctionContext, NodeIdAndSpan};
127 use trans::datum::{Datum, Lvalue};
128 use trans::debuginfo::{DebugLoc, ToDebugLoc};
129 use trans::glue;
130 use middle::region;
131 use trans::type_::Type;
132 use middle::ty::{self, Ty};
133 use std::fmt;
134 use syntax::ast;
135
136 pub struct CleanupScope<'blk, 'tcx: 'blk> {
137     // The id of this cleanup scope. If the id is None,
138     // this is a *temporary scope* that is pushed during trans to
139     // cleanup miscellaneous garbage that trans may generate whose
140     // lifetime is a subset of some expression.  See module doc for
141     // more details.
142     kind: CleanupScopeKind<'blk, 'tcx>,
143
144     // Cleanups to run upon scope exit.
145     cleanups: Vec<CleanupObj<'tcx>>,
146
147     // The debug location any drop calls generated for this scope will be
148     // associated with.
149     debug_loc: DebugLoc,
150
151     cached_early_exits: Vec<CachedEarlyExit>,
152     cached_landing_pad: Option<BasicBlockRef>,
153 }
154
155 #[derive(Copy, Clone, Debug)]
156 pub struct CustomScopeIndex {
157     index: usize
158 }
159
160 pub const EXIT_BREAK: usize = 0;
161 pub const EXIT_LOOP: usize = 1;
162 pub const EXIT_MAX: usize = 2;
163
164 pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
165     CustomScopeKind,
166     AstScopeKind(ast::NodeId),
167     LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
168 }
169
170 impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
171     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
172         match *self {
173             CustomScopeKind => write!(f, "CustomScopeKind"),
174             AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
175             LoopScopeKind(nid, ref blks) => {
176                 try!(write!(f, "LoopScopeKind({}, [", nid));
177                 for blk in blks {
178                     try!(write!(f, "{:p}, ", blk));
179                 }
180                 write!(f, "])")
181             }
182         }
183     }
184 }
185
186 #[derive(Copy, Clone, PartialEq, Debug)]
187 pub enum EarlyExitLabel {
188     UnwindExit,
189     ReturnExit,
190     LoopExit(ast::NodeId, usize)
191 }
192
193 #[derive(Copy, Clone)]
194 pub struct CachedEarlyExit {
195     label: EarlyExitLabel,
196     cleanup_block: BasicBlockRef,
197 }
198
199 pub trait Cleanup<'tcx> {
200     fn must_unwind(&self) -> bool;
201     fn is_lifetime_end(&self) -> bool;
202     fn trans<'blk>(&self,
203                    bcx: Block<'blk, 'tcx>,
204                    debug_loc: DebugLoc)
205                    -> Block<'blk, 'tcx>;
206 }
207
208 pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
209
210 #[derive(Copy, Clone, Debug)]
211 pub enum ScopeId {
212     AstScope(ast::NodeId),
213     CustomScope(CustomScopeIndex)
214 }
215
216 #[derive(Copy, Clone, Debug)]
217 pub struct DropHint<K>(pub ast::NodeId, pub K);
218
219 pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
220 pub type DropHintValue = DropHint<ValueRef>;
221
222 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
223     /// Invoked when we start to trans the code contained within a new cleanup scope.
224     fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
225         debug!("push_ast_cleanup_scope({})",
226                self.ccx.tcx().map.node_to_string(debug_loc.id));
227
228         // FIXME(#2202) -- currently closure bodies have a parent
229         // region, which messes up the assertion below, since there
230         // are no cleanup scopes on the stack at the start of
231         // trans'ing a closure body.  I think though that this should
232         // eventually be fixed by closure bodies not having a parent
233         // region, though that's a touch unclear, and it might also be
234         // better just to narrow this assertion more (i.e., by
235         // excluding id's that correspond to closure bodies only). For
236         // now we just say that if there is already an AST scope on the stack,
237         // this new AST scope had better be its immediate child.
238         let top_scope = self.top_ast_scope();
239         if top_scope.is_some() {
240             assert!((self.ccx
241                      .tcx()
242                      .region_maps
243                      .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
244                      .map(|s|s.node_id()) == top_scope)
245                     ||
246                     (self.ccx
247                      .tcx()
248                      .region_maps
249                      .opt_encl_scope(region::CodeExtent::DestructionScope(debug_loc.id))
250                      .map(|s|s.node_id()) == top_scope));
251         }
252
253         self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
254                                           debug_loc.debug_loc()));
255     }
256
257     fn push_loop_cleanup_scope(&self,
258                                id: ast::NodeId,
259                                exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
260         debug!("push_loop_cleanup_scope({})",
261                self.ccx.tcx().map.node_to_string(id));
262         assert_eq!(Some(id), self.top_ast_scope());
263
264         // Just copy the debuginfo source location from the enclosing scope
265         let debug_loc = self.scopes
266                             .borrow()
267                             .last()
268                             .unwrap()
269                             .debug_loc;
270
271         self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
272     }
273
274     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
275         let index = self.scopes_len();
276         debug!("push_custom_cleanup_scope(): {}", index);
277
278         // Just copy the debuginfo source location from the enclosing scope
279         let debug_loc = self.scopes
280                             .borrow()
281                             .last()
282                             .map(|opt_scope| opt_scope.debug_loc)
283                             .unwrap_or(DebugLoc::None);
284
285         self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
286         CustomScopeIndex { index: index }
287     }
288
289     fn push_custom_cleanup_scope_with_debug_loc(&self,
290                                                 debug_loc: NodeIdAndSpan)
291                                                 -> CustomScopeIndex {
292         let index = self.scopes_len();
293         debug!("push_custom_cleanup_scope(): {}", index);
294
295         self.push_scope(CleanupScope::new(CustomScopeKind,
296                                           debug_loc.debug_loc()));
297         CustomScopeIndex { index: index }
298     }
299
300     /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
301     /// stack, and generates the code to do its cleanups for normal exit.
302     fn pop_and_trans_ast_cleanup_scope(&self,
303                                        bcx: Block<'blk, 'tcx>,
304                                        cleanup_scope: ast::NodeId)
305                                        -> Block<'blk, 'tcx> {
306         debug!("pop_and_trans_ast_cleanup_scope({})",
307                self.ccx.tcx().map.node_to_string(cleanup_scope));
308
309         assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
310
311         let scope = self.pop_scope();
312         self.trans_scope_cleanups(bcx, &scope)
313     }
314
315     /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
316     /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
317     /// branching to a block generated by `normal_exit_block`.
318     fn pop_loop_cleanup_scope(&self,
319                               cleanup_scope: ast::NodeId) {
320         debug!("pop_loop_cleanup_scope({})",
321                self.ccx.tcx().map.node_to_string(cleanup_scope));
322
323         assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
324
325         let _ = self.pop_scope();
326     }
327
328     /// Removes the top cleanup scope from the stack without executing its cleanups. The top
329     /// cleanup scope must be the temporary scope `custom_scope`.
330     fn pop_custom_cleanup_scope(&self,
331                                 custom_scope: CustomScopeIndex) {
332         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
333         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
334         let _ = self.pop_scope();
335     }
336
337     /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
338     /// generates the code to do its cleanups for normal exit.
339     fn pop_and_trans_custom_cleanup_scope(&self,
340                                           bcx: Block<'blk, 'tcx>,
341                                           custom_scope: CustomScopeIndex)
342                                           -> Block<'blk, 'tcx> {
343         debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
344         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
345
346         let scope = self.pop_scope();
347         self.trans_scope_cleanups(bcx, &scope)
348     }
349
350     /// Returns the id of the top-most loop scope
351     fn top_loop_scope(&self) -> ast::NodeId {
352         for scope in self.scopes.borrow().iter().rev() {
353             if let LoopScopeKind(id, _) = scope.kind {
354                 return id;
355             }
356         }
357         self.ccx.sess().bug("no loop scope found");
358     }
359
360     /// Returns a block to branch to which will perform all pending cleanups and then
361     /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
362     fn normal_exit_block(&'blk self,
363                          cleanup_scope: ast::NodeId,
364                          exit: usize) -> BasicBlockRef {
365         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
366     }
367
368     /// Returns a block to branch to which will perform all pending cleanups and then return from
369     /// this function
370     fn return_exit_block(&'blk self) -> BasicBlockRef {
371         self.trans_cleanups_to_exit_scope(ReturnExit)
372     }
373
374     fn schedule_lifetime_end(&self,
375                              cleanup_scope: ScopeId,
376                              val: ValueRef) {
377         let drop = box LifetimeEnd {
378             ptr: val,
379         };
380
381         debug!("schedule_lifetime_end({:?}, val={})",
382                cleanup_scope,
383                self.ccx.tn().val_to_string(val));
384
385         self.schedule_clean(cleanup_scope, drop as CleanupObj);
386     }
387
388     /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
389     fn schedule_drop_mem(&self,
390                          cleanup_scope: ScopeId,
391                          val: ValueRef,
392                          ty: Ty<'tcx>) {
393         if !self.type_needs_drop(ty) { return; }
394         let drop = box DropValue {
395             is_immediate: false,
396             val: val,
397             ty: ty,
398             fill_on_drop: false,
399             skip_dtor: false,
400         };
401
402         debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
403                cleanup_scope,
404                self.ccx.tn().val_to_string(val),
405                ty,
406                drop.fill_on_drop,
407                drop.skip_dtor);
408
409         self.schedule_clean(cleanup_scope, drop as CleanupObj);
410     }
411
412     /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
413     fn schedule_drop_and_fill_mem(&self,
414                                   cleanup_scope: ScopeId,
415                                   val: ValueRef,
416                                   ty: Ty<'tcx>) {
417         if !self.type_needs_drop(ty) { return; }
418
419         let drop = box DropValue {
420             is_immediate: false,
421             val: val,
422             ty: ty,
423             fill_on_drop: true,
424             skip_dtor: false,
425         };
426
427         debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?}, fill_on_drop={}, skip_dtor={})",
428                cleanup_scope,
429                self.ccx.tn().val_to_string(val),
430                ty,
431                drop.fill_on_drop,
432                drop.skip_dtor);
433
434         self.schedule_clean(cleanup_scope, drop as CleanupObj);
435     }
436
437     /// Issue #23611: Schedules a (deep) drop of the contents of
438     /// `val`, which is a pointer to an instance of struct/enum type
439     /// `ty`. The scheduled code handles extracting the discriminant
440     /// and dropping the contents associated with that variant
441     /// *without* executing any associated drop implementation.
442     fn schedule_drop_adt_contents(&self,
443                                   cleanup_scope: ScopeId,
444                                   val: ValueRef,
445                                   ty: Ty<'tcx>) {
446         // `if` below could be "!contents_needs_drop"; skipping drop
447         // is just an optimization, so sound to be conservative.
448         if !self.type_needs_drop(ty) { return; }
449
450         let drop = box DropValue {
451             is_immediate: false,
452             val: val,
453             ty: ty,
454             fill_on_drop: false,
455             skip_dtor: true,
456         };
457
458         debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
459                cleanup_scope,
460                self.ccx.tn().val_to_string(val),
461                ty,
462                drop.fill_on_drop,
463                drop.skip_dtor);
464
465         self.schedule_clean(cleanup_scope, drop as CleanupObj);
466     }
467
468     /// Schedules a (deep) drop of `val`, which is an instance of `ty`
469     fn schedule_drop_immediate(&self,
470                                cleanup_scope: ScopeId,
471                                val: ValueRef,
472                                ty: Ty<'tcx>) {
473
474         if !self.type_needs_drop(ty) { return; }
475         let drop = box DropValue {
476             is_immediate: true,
477             val: val,
478             ty: ty,
479             fill_on_drop: false,
480             skip_dtor: false,
481         };
482
483         debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
484                cleanup_scope,
485                self.ccx.tn().val_to_string(val),
486                ty,
487                drop.fill_on_drop,
488                drop.skip_dtor);
489
490         self.schedule_clean(cleanup_scope, drop as CleanupObj);
491     }
492
493     /// Schedules a call to `free(val)`. Note that this is a shallow operation.
494     fn schedule_free_value(&self,
495                            cleanup_scope: ScopeId,
496                            val: ValueRef,
497                            heap: Heap,
498                            content_ty: Ty<'tcx>) {
499         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
500
501         debug!("schedule_free_value({:?}, val={}, heap={:?})",
502                cleanup_scope,
503                self.ccx.tn().val_to_string(val),
504                heap);
505
506         self.schedule_clean(cleanup_scope, drop as CleanupObj);
507     }
508
509     fn schedule_clean(&self,
510                       cleanup_scope: ScopeId,
511                       cleanup: CleanupObj<'tcx>) {
512         match cleanup_scope {
513             AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
514             CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
515         }
516     }
517
518     /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
519     /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
520     /// scope.
521     fn schedule_clean_in_ast_scope(&self,
522                                    cleanup_scope: ast::NodeId,
523                                    cleanup: CleanupObj<'tcx>) {
524         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
525                cleanup_scope);
526
527         for scope in self.scopes.borrow_mut().iter_mut().rev() {
528             if scope.kind.is_ast_with_id(cleanup_scope) {
529                 scope.cleanups.push(cleanup);
530                 scope.clear_cached_exits();
531                 return;
532             } else {
533                 // will be adding a cleanup to some enclosing scope
534                 scope.clear_cached_exits();
535             }
536         }
537
538         self.ccx.sess().bug(
539             &format!("no cleanup scope {} found",
540                     self.ccx.tcx().map.node_to_string(cleanup_scope)));
541     }
542
543     /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
544     fn schedule_clean_in_custom_scope(&self,
545                                       custom_scope: CustomScopeIndex,
546                                       cleanup: CleanupObj<'tcx>) {
547         debug!("schedule_clean_in_custom_scope(custom_scope={})",
548                custom_scope.index);
549
550         assert!(self.is_valid_custom_scope(custom_scope));
551
552         let mut scopes = self.scopes.borrow_mut();
553         let scope = &mut (*scopes)[custom_scope.index];
554         scope.cleanups.push(cleanup);
555         scope.clear_cached_exits();
556     }
557
558     /// Returns true if there are pending cleanups that should execute on panic.
559     fn needs_invoke(&self) -> bool {
560         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
561     }
562
563     /// Returns a basic block to branch to in the event of a panic. This block will run the panic
564     /// cleanups and eventually invoke the LLVM `Resume` instruction.
565     fn get_landing_pad(&'blk self) -> BasicBlockRef {
566         let _icx = base::push_ctxt("get_landing_pad");
567
568         debug!("get_landing_pad");
569
570         let orig_scopes_len = self.scopes_len();
571         assert!(orig_scopes_len > 0);
572
573         // Remove any scopes that do not have cleanups on panic:
574         let mut popped_scopes = vec!();
575         while !self.top_scope(|s| s.needs_invoke()) {
576             debug!("top scope does not need invoke");
577             popped_scopes.push(self.pop_scope());
578         }
579
580         // Check for an existing landing pad in the new topmost scope:
581         let llbb = self.get_or_create_landing_pad();
582
583         // Push the scopes we removed back on:
584         loop {
585             match popped_scopes.pop() {
586                 Some(scope) => self.push_scope(scope),
587                 None => break
588             }
589         }
590
591         assert_eq!(self.scopes_len(), orig_scopes_len);
592
593         return llbb;
594     }
595 }
596
597 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
598     /// Returns the id of the current top-most AST scope, if any.
599     fn top_ast_scope(&self) -> Option<ast::NodeId> {
600         for scope in self.scopes.borrow().iter().rev() {
601             match scope.kind {
602                 CustomScopeKind | LoopScopeKind(..) => {}
603                 AstScopeKind(i) => {
604                     return Some(i);
605                 }
606             }
607         }
608         None
609     }
610
611     fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
612         self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
613     }
614
615     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
616         self.is_valid_custom_scope(custom_scope) &&
617             custom_scope.index == self.scopes.borrow().len() - 1
618     }
619
620     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
621         let scopes = self.scopes.borrow();
622         custom_scope.index < scopes.len() &&
623             (*scopes)[custom_scope.index].kind.is_temp()
624     }
625
626     /// Generates the cleanups for `scope` into `bcx`
627     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
628                             bcx: Block<'blk, 'tcx>,
629                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
630
631         let mut bcx = bcx;
632         if !bcx.unreachable.get() {
633             for cleanup in scope.cleanups.iter().rev() {
634                 bcx = cleanup.trans(bcx, scope.debug_loc);
635             }
636         }
637         bcx
638     }
639
640     fn scopes_len(&self) -> usize {
641         self.scopes.borrow().len()
642     }
643
644     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
645         self.scopes.borrow_mut().push(scope)
646     }
647
648     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
649         debug!("popping cleanup scope {}, {} scopes remaining",
650                self.top_scope(|s| s.block_name("")),
651                self.scopes_len() - 1);
652
653         self.scopes.borrow_mut().pop().unwrap()
654     }
655
656     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
657         f(self.scopes.borrow().last().unwrap())
658     }
659
660     /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
661     /// unwind. This function will generate all cleanups between the top of the stack and the exit
662     /// `label` and return a basic block that the caller can branch to.
663     ///
664     /// For example, if the current stack of cleanups were as follows:
665     ///
666     ///      AST 22
667     ///      Custom 1
668     ///      AST 23
669     ///      Loop 23
670     ///      Custom 2
671     ///      AST 24
672     ///
673     /// and the `label` specifies a break from `Loop 23`, then this function would generate a
674     /// series of basic blocks as follows:
675     ///
676     ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
677     ///
678     /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
679     /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
680     /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
681     /// `break_blk`.
682     fn trans_cleanups_to_exit_scope(&'blk self,
683                                     label: EarlyExitLabel)
684                                     -> BasicBlockRef {
685         debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
686                label, self.scopes_len());
687
688         let orig_scopes_len = self.scopes_len();
689         let mut prev_llbb;
690         let mut popped_scopes = vec!();
691
692         // First we pop off all the cleanup stacks that are
693         // traversed until the exit is reached, pushing them
694         // onto the side vector `popped_scopes`. No code is
695         // generated at this time.
696         //
697         // So, continuing the example from above, we would wind up
698         // with a `popped_scopes` vector of `[AST 24, Custom 2]`.
699         // (Presuming that there are no cached exits)
700         loop {
701             if self.scopes_len() == 0 {
702                 match label {
703                     UnwindExit => {
704                         // Generate a block that will `Resume`.
705                         let prev_bcx = self.new_block(true, "resume", None);
706                         let personality = self.personality.get().expect(
707                             "create_landing_pad() should have set this");
708                         build::Resume(prev_bcx,
709                                       build::Load(prev_bcx, personality));
710                         prev_llbb = prev_bcx.llbb;
711                         break;
712                     }
713
714                     ReturnExit => {
715                         prev_llbb = self.get_llreturn();
716                         break;
717                     }
718
719                     LoopExit(id, _) => {
720                         self.ccx.sess().bug(&format!(
721                                 "cannot exit from scope {}, \
722                                 not in scope", id));
723                     }
724                 }
725             }
726
727             // Check if we have already cached the unwinding of this
728             // scope for this label. If so, we can stop popping scopes
729             // and branch to the cached label, since it contains the
730             // cleanups for any subsequent scopes.
731             match self.top_scope(|s| s.cached_early_exit(label)) {
732                 Some(cleanup_block) => {
733                     prev_llbb = cleanup_block;
734                     break;
735                 }
736                 None => { }
737             }
738
739             // Pop off the scope, since we will be generating
740             // unwinding code for it. If we are searching for a loop exit,
741             // and this scope is that loop, then stop popping and set
742             // `prev_llbb` to the appropriate exit block from the loop.
743             popped_scopes.push(self.pop_scope());
744             let scope = popped_scopes.last().unwrap();
745             match label {
746                 UnwindExit | ReturnExit => { }
747                 LoopExit(id, exit) => {
748                     match scope.kind.early_exit_block(id, exit) {
749                         Some(exitllbb) => {
750                             prev_llbb = exitllbb;
751                             break;
752                         }
753
754                         None => { }
755                     }
756                 }
757             }
758         }
759
760         debug!("trans_cleanups_to_exit_scope: popped {} scopes",
761                popped_scopes.len());
762
763         // Now push the popped scopes back on. As we go,
764         // we track in `prev_llbb` the exit to which this scope
765         // should branch when it's done.
766         //
767         // So, continuing with our example, we will start out with
768         // `prev_llbb` being set to `break_blk` (or possibly a cached
769         // early exit). We will then pop the scopes from `popped_scopes`
770         // and generate a basic block for each one, prepending it in the
771         // series and updating `prev_llbb`. So we begin by popping `Custom 2`
772         // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
773         // branch to `prev_llbb == break_blk`, giving us a sequence like:
774         //
775         //     Cleanup(Custom 2) -> prev_llbb
776         //
777         // We then pop `AST 24` and repeat the process, giving us the sequence:
778         //
779         //     Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
780         //
781         // At this point, `popped_scopes` is empty, and so the final block
782         // that we return to the user is `Cleanup(AST 24)`.
783         while let Some(mut scope) = popped_scopes.pop() {
784             if !scope.cleanups.is_empty() {
785                 let name = scope.block_name("clean");
786                 debug!("generating cleanups for {}", name);
787                 let bcx_in = self.new_block(label.is_unwind(),
788                                             &name[..],
789                                             None);
790                 let mut bcx_out = bcx_in;
791                 for cleanup in scope.cleanups.iter().rev() {
792                     bcx_out = cleanup.trans(bcx_out,
793                                             scope.debug_loc);
794                 }
795                 build::Br(bcx_out, prev_llbb, DebugLoc::None);
796                 prev_llbb = bcx_in.llbb;
797
798                 scope.add_cached_early_exit(label, prev_llbb);
799             }
800             self.push_scope(scope);
801         }
802
803         debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
804
805         assert_eq!(self.scopes_len(), orig_scopes_len);
806         prev_llbb
807     }
808
809     /// Creates a landing pad for the top scope, if one does not exist.  The landing pad will
810     /// perform all cleanups necessary for an unwind and then `resume` to continue error
811     /// propagation:
812     ///
813     ///     landing_pad -> ... cleanups ... -> [resume]
814     ///
815     /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
816     /// in this function itself.)
817     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
818         let pad_bcx;
819
820         debug!("get_or_create_landing_pad");
821
822         // Check if a landing pad block exists; if not, create one.
823         {
824             let mut scopes = self.scopes.borrow_mut();
825             let last_scope = scopes.last_mut().unwrap();
826             match last_scope.cached_landing_pad {
827                 Some(llbb) => { return llbb; }
828                 None => {
829                     let name = last_scope.block_name("unwind");
830                     pad_bcx = self.new_block(true, &name[..], None);
831                     last_scope.cached_landing_pad = Some(pad_bcx.llbb);
832                 }
833             }
834         }
835
836         // The landing pad return type (the type being propagated). Not sure what
837         // this represents but it's determined by the personality function and
838         // this is what the EH proposal example uses.
839         let llretty = Type::struct_(self.ccx,
840                                     &[Type::i8p(self.ccx), Type::i32(self.ccx)],
841                                     false);
842
843         let llpersonality = pad_bcx.fcx.eh_personality();
844
845         // The only landing pad clause will be 'cleanup'
846         let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
847
848         // The landing pad block is a cleanup
849         build::SetCleanup(pad_bcx, llretval);
850
851         // We store the retval in a function-central alloca, so that calls to
852         // Resume can find it.
853         match self.personality.get() {
854             Some(addr) => {
855                 build::Store(pad_bcx, llretval, addr);
856             }
857             None => {
858                 let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
859                 self.personality.set(Some(addr));
860                 build::Store(pad_bcx, llretval, addr);
861             }
862         }
863
864         // Generate the cleanup block and branch to it.
865         let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
866         build::Br(pad_bcx, cleanup_llbb, DebugLoc::None);
867
868         return pad_bcx.llbb;
869     }
870 }
871
872 impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
873     fn new(kind: CleanupScopeKind<'blk, 'tcx>,
874            debug_loc: DebugLoc)
875         -> CleanupScope<'blk, 'tcx> {
876         CleanupScope {
877             kind: kind,
878             debug_loc: debug_loc,
879             cleanups: vec!(),
880             cached_early_exits: vec!(),
881             cached_landing_pad: None,
882         }
883     }
884
885     fn clear_cached_exits(&mut self) {
886         self.cached_early_exits = vec!();
887         self.cached_landing_pad = None;
888     }
889
890     fn cached_early_exit(&self,
891                          label: EarlyExitLabel)
892                          -> Option<BasicBlockRef> {
893         self.cached_early_exits.iter().
894             find(|e| e.label == label).
895             map(|e| e.cleanup_block)
896     }
897
898     fn add_cached_early_exit(&mut self,
899                              label: EarlyExitLabel,
900                              blk: BasicBlockRef) {
901         self.cached_early_exits.push(
902             CachedEarlyExit { label: label,
903                               cleanup_block: blk });
904     }
905
906     /// True if this scope has cleanups that need unwinding
907     fn needs_invoke(&self) -> bool {
908
909         self.cached_landing_pad.is_some() ||
910             self.cleanups.iter().any(|c| c.must_unwind())
911     }
912
913     /// Returns a suitable name to use for the basic block that handles this cleanup scope
914     fn block_name(&self, prefix: &str) -> String {
915         match self.kind {
916             CustomScopeKind => format!("{}_custom_", prefix),
917             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
918             LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
919         }
920     }
921
922     /// Manipulate cleanup scope for call arguments. Conceptually, each
923     /// argument to a call is an lvalue, and performing the call moves each
924     /// of the arguments into a new rvalue (which gets cleaned up by the
925     /// callee). As an optimization, instead of actually performing all of
926     /// those moves, trans just manipulates the cleanup scope to obtain the
927     /// same effect.
928     pub fn drop_non_lifetime_clean(&mut self) {
929         self.cleanups.retain(|c| c.is_lifetime_end());
930         self.clear_cached_exits();
931     }
932 }
933
934 impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
935     fn is_temp(&self) -> bool {
936         match *self {
937             CustomScopeKind => true,
938             LoopScopeKind(..) | AstScopeKind(..) => false,
939         }
940     }
941
942     fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
943         match *self {
944             CustomScopeKind | LoopScopeKind(..) => false,
945             AstScopeKind(i) => i == id
946         }
947     }
948
949     fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
950         match *self {
951             CustomScopeKind | AstScopeKind(..) => false,
952             LoopScopeKind(i, _) => i == id
953         }
954     }
955
956     /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
957     fn early_exit_block(&self,
958                         id: ast::NodeId,
959                         exit: usize) -> Option<BasicBlockRef> {
960         match *self {
961             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
962             _ => None,
963         }
964     }
965 }
966
967 impl EarlyExitLabel {
968     fn is_unwind(&self) -> bool {
969         match *self {
970             UnwindExit => true,
971             _ => false
972         }
973     }
974 }
975
976 ///////////////////////////////////////////////////////////////////////////
977 // Cleanup types
978
979 #[derive(Copy, Clone)]
980 pub struct DropValue<'tcx> {
981     is_immediate: bool,
982     val: ValueRef,
983     ty: Ty<'tcx>,
984     fill_on_drop: bool,
985     skip_dtor: bool,
986 }
987
988 impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
989     fn must_unwind(&self) -> bool {
990         true
991     }
992
993     fn is_lifetime_end(&self) -> bool {
994         false
995     }
996
997     fn trans<'blk>(&self,
998                    bcx: Block<'blk, 'tcx>,
999                    debug_loc: DebugLoc)
1000                    -> Block<'blk, 'tcx> {
1001         let skip_dtor = self.skip_dtor;
1002         let _icx = if skip_dtor {
1003             base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
1004         } else {
1005             base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
1006         };
1007         let bcx = if self.is_immediate {
1008             glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
1009         } else {
1010             glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
1011         };
1012         if self.fill_on_drop {
1013             base::drop_done_fill_mem(bcx, self.val, self.ty);
1014         }
1015         bcx
1016     }
1017 }
1018
1019 #[derive(Copy, Clone, Debug)]
1020 pub enum Heap {
1021     HeapExchange
1022 }
1023
1024 #[derive(Copy, Clone)]
1025 pub struct FreeValue<'tcx> {
1026     ptr: ValueRef,
1027     heap: Heap,
1028     content_ty: Ty<'tcx>
1029 }
1030
1031 impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
1032     fn must_unwind(&self) -> bool {
1033         true
1034     }
1035
1036     fn is_lifetime_end(&self) -> bool {
1037         false
1038     }
1039
1040     fn trans<'blk>(&self,
1041                    bcx: Block<'blk, 'tcx>,
1042                    debug_loc: DebugLoc)
1043                    -> Block<'blk, 'tcx> {
1044         match self.heap {
1045             HeapExchange => {
1046                 glue::trans_exchange_free_ty(bcx,
1047                                              self.ptr,
1048                                              self.content_ty,
1049                                              debug_loc)
1050             }
1051         }
1052     }
1053 }
1054
1055 #[derive(Copy, Clone)]
1056 pub struct LifetimeEnd {
1057     ptr: ValueRef,
1058 }
1059
1060 impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
1061     fn must_unwind(&self) -> bool {
1062         false
1063     }
1064
1065     fn is_lifetime_end(&self) -> bool {
1066         true
1067     }
1068
1069     fn trans<'blk>(&self,
1070                    bcx: Block<'blk, 'tcx>,
1071                    debug_loc: DebugLoc)
1072                    -> Block<'blk, 'tcx> {
1073         debug_loc.apply(bcx.fcx);
1074         base::call_lifetime_end(bcx, self.ptr);
1075         bcx
1076     }
1077 }
1078
1079 pub fn temporary_scope(tcx: &ty::ctxt,
1080                        id: ast::NodeId)
1081                        -> ScopeId {
1082     match tcx.region_maps.temporary_scope(id) {
1083         Some(scope) => {
1084             let r = AstScope(scope.node_id());
1085             debug!("temporary_scope({}) = {:?}", id, r);
1086             r
1087         }
1088         None => {
1089             tcx.sess.bug(&format!("no temporary scope available for expr {}",
1090                                  id))
1091         }
1092     }
1093 }
1094
1095 pub fn var_scope(tcx: &ty::ctxt,
1096                  id: ast::NodeId)
1097                  -> ScopeId {
1098     let r = AstScope(tcx.region_maps.var_scope(id).node_id());
1099     debug!("var_scope({}) = {:?}", id, r);
1100     r
1101 }
1102
1103 ///////////////////////////////////////////////////////////////////////////
1104 // These traits just exist to put the methods into this file.
1105
1106 pub trait CleanupMethods<'blk, 'tcx> {
1107     fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
1108     fn push_loop_cleanup_scope(&self,
1109                                id: ast::NodeId,
1110                                exits: [Block<'blk, 'tcx>; EXIT_MAX]);
1111     fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
1112     fn push_custom_cleanup_scope_with_debug_loc(&self,
1113                                                 debug_loc: NodeIdAndSpan)
1114                                                 -> CustomScopeIndex;
1115     fn pop_and_trans_ast_cleanup_scope(&self,
1116                                        bcx: Block<'blk, 'tcx>,
1117                                        cleanup_scope: ast::NodeId)
1118                                        -> Block<'blk, 'tcx>;
1119     fn pop_loop_cleanup_scope(&self,
1120                               cleanup_scope: ast::NodeId);
1121     fn pop_custom_cleanup_scope(&self,
1122                                 custom_scope: CustomScopeIndex);
1123     fn pop_and_trans_custom_cleanup_scope(&self,
1124                                           bcx: Block<'blk, 'tcx>,
1125                                           custom_scope: CustomScopeIndex)
1126                                           -> Block<'blk, 'tcx>;
1127     fn top_loop_scope(&self) -> ast::NodeId;
1128     fn normal_exit_block(&'blk self,
1129                          cleanup_scope: ast::NodeId,
1130                          exit: usize) -> BasicBlockRef;
1131     fn return_exit_block(&'blk self) -> BasicBlockRef;
1132     fn schedule_lifetime_end(&self,
1133                          cleanup_scope: ScopeId,
1134                          val: ValueRef);
1135     fn schedule_drop_mem(&self,
1136                          cleanup_scope: ScopeId,
1137                          val: ValueRef,
1138                          ty: Ty<'tcx>);
1139     fn schedule_drop_and_fill_mem(&self,
1140                                   cleanup_scope: ScopeId,
1141                                   val: ValueRef,
1142                                   ty: Ty<'tcx>);
1143     fn schedule_drop_adt_contents(&self,
1144                                   cleanup_scope: ScopeId,
1145                                   val: ValueRef,
1146                                   ty: Ty<'tcx>);
1147     fn schedule_drop_immediate(&self,
1148                                cleanup_scope: ScopeId,
1149                                val: ValueRef,
1150                                ty: Ty<'tcx>);
1151     fn schedule_free_value(&self,
1152                            cleanup_scope: ScopeId,
1153                            val: ValueRef,
1154                            heap: Heap,
1155                            content_ty: Ty<'tcx>);
1156     fn schedule_clean(&self,
1157                       cleanup_scope: ScopeId,
1158                       cleanup: CleanupObj<'tcx>);
1159     fn schedule_clean_in_ast_scope(&self,
1160                                    cleanup_scope: ast::NodeId,
1161                                    cleanup: CleanupObj<'tcx>);
1162     fn schedule_clean_in_custom_scope(&self,
1163                                     custom_scope: CustomScopeIndex,
1164                                     cleanup: CleanupObj<'tcx>);
1165     fn needs_invoke(&self) -> bool;
1166     fn get_landing_pad(&'blk self) -> BasicBlockRef;
1167 }
1168
1169 trait CleanupHelperMethods<'blk, 'tcx> {
1170     fn top_ast_scope(&self) -> Option<ast::NodeId>;
1171     fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
1172     fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1173     fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
1174     fn trans_scope_cleanups(&self,
1175                             bcx: Block<'blk, 'tcx>,
1176                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
1177     fn trans_cleanups_to_exit_scope(&'blk self,
1178                                     label: EarlyExitLabel)
1179                                     -> BasicBlockRef;
1180     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
1181     fn scopes_len(&self) -> usize;
1182     fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
1183     fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
1184     fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
1185 }