]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
fix generator drop caching
[rust.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `region::Scope`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```
51 # let cond = true;
52 loop {
53     let x = ..;
54     if cond { break; }
55     let y = ..;
56 }
57 ```
58
59 When processing the `let x`, we will add one drop to the scope for
60 `x`.  The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
63
64 ### Early exit
65
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
72 also add all drops.
73
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
80 result.
81
82 ### Loop scopes
83
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
86 should go to.
87
88 */
89
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
91 use hair::LintLevel;
92 use rustc::middle::region;
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::hir::def_id::LOCAL_CRATE;
95 use rustc::mir::*;
96 use rustc::mir::transform::MirSource;
97 use syntax_pos::{Span};
98 use rustc_data_structures::indexed_vec::Idx;
99 use rustc_data_structures::fx::FxHashMap;
100
101 #[derive(Debug)]
102 pub struct Scope<'tcx> {
103     /// The visibility scope this scope was created in.
104     visibility_scope: VisibilityScope,
105
106     /// the region span of this scope within source code.
107     region_scope: region::Scope,
108
109     /// the span of that region_scope
110     region_scope_span: Span,
111
112     /// Whether there's anything to do for the cleanup path, that is,
113     /// when unwinding through this scope. This includes destructors,
114     /// but not StorageDead statements, which don't get emitted at all
115     /// for unwinding, for several reasons:
116     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
117     ///  * LLVM's memory dependency analysis can't handle it atm
118     ///  * polluting the cleanup MIR with StorageDead creates
119     ///    landing pads even though there's no actual destructors
120     ///  * freeing up stack space has no effect during unwinding
121     needs_cleanup: bool,
122
123     /// set of lvalues to drop when exiting this scope. This starts
124     /// out empty but grows as variables are declared during the
125     /// building process. This is a stack, so we always drop from the
126     /// end of the vector (top of the stack) first.
127     drops: Vec<DropData<'tcx>>,
128
129     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
130     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
131
132     /// The cache for drop chain on "generator drop" exit.
133     cached_generator_drop: Option<BasicBlock>,
134 }
135
136 #[derive(Debug)]
137 struct DropData<'tcx> {
138     /// span where drop obligation was incurred (typically where lvalue was declared)
139     span: Span,
140
141     /// lvalue to drop
142     location: Lvalue<'tcx>,
143
144     /// Whether this is a full value Drop, or just a StorageDead.
145     kind: DropKind
146 }
147
148 #[derive(Debug, Default, Clone, Copy)]
149 struct CachedBlock {
150     /// The cached block for the cleanups-on-diverge path. This block
151     /// contains code to run the current drop and all the preceding
152     /// drops (i.e. those having lower index in Drop’s Scope drop
153     /// array)
154     unwind: Option<BasicBlock>,
155
156     /// The cached block for unwinds during cleanups-on-generator-drop path
157     ///
158     /// This is split from the standard unwind path here to prevent drop
159     /// elaboration from creating drop flags that would have to be captured
160     /// by the generator. I'm not sure how important this optimization is,
161     /// but it is here.
162     generator_drop: Option<BasicBlock>,
163 }
164
165 #[derive(Debug)]
166 enum DropKind {
167     Value {
168         cached_block: CachedBlock,
169     },
170     Storage
171 }
172
173 #[derive(Clone, Debug)]
174 pub struct BreakableScope<'tcx> {
175     /// Region scope of the loop
176     pub region_scope: region::Scope,
177     /// Where the body of the loop begins. `None` if block
178     pub continue_block: Option<BasicBlock>,
179     /// Block to branch into when the loop or block terminates (either by being `break`-en out
180     /// from, or by having its condition to become false)
181     pub break_block: BasicBlock,
182     /// The destination of the loop/block expression itself (i.e. where to put the result of a
183     /// `break` expression)
184     pub break_destination: Lvalue<'tcx>,
185 }
186
187 impl CachedBlock {
188     fn invalidate(&mut self) {
189         self.generator_drop = None;
190         self.unwind = None;
191     }
192
193     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
194         if generator_drop {
195             self.generator_drop
196         } else {
197             self.unwind
198         }
199     }
200
201     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
202         if generator_drop {
203             &mut self.generator_drop
204         } else {
205             &mut self.unwind
206         }
207     }
208 }
209
210 impl DropKind {
211     fn may_panic(&self) -> bool {
212         match *self {
213             DropKind::Value { .. } => true,
214             DropKind::Storage => false
215         }
216     }
217 }
218
219 impl<'tcx> Scope<'tcx> {
220     /// Invalidate all the cached blocks in the scope.
221     ///
222     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
223     /// larger extent of code.
224     ///
225     /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
226     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
227     /// top-of-scope (as opposed to dependent scopes).
228     fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
229         // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
230         // with lots of `try!`?
231
232         // cached exits drop storage and refer to the top-of-scope
233         self.cached_exits.clear();
234
235         if !storage_only {
236             // the current generator drop ignores storage but refers to top-of-scope
237             self.cached_generator_drop = None;
238         }
239
240         if !storage_only && !this_scope_only {
241             for dropdata in &mut self.drops {
242                 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
243                     cached_block.invalidate();
244                 }
245             }
246         }
247     }
248
249     /// Returns the cached entrypoint for diverging exit from this scope.
250     ///
251     /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
252     /// this method to work correctly.
253     fn cached_block(&self, generator_drop: bool) -> Option<BasicBlock> {
254         let mut drops = self.drops.iter().rev().filter_map(|data| {
255             match data.kind {
256                 DropKind::Value { cached_block } => {
257                     Some(cached_block.get(generator_drop))
258                 }
259                 DropKind::Storage => None
260             }
261         });
262         if let Some(cached_block) = drops.next() {
263             Some(cached_block.expect("drop cache is not filled"))
264         } else {
265             None
266         }
267     }
268
269     /// Given a span and this scope's visibility scope, make a SourceInfo.
270     fn source_info(&self, span: Span) -> SourceInfo {
271         SourceInfo {
272             span,
273             scope: self.visibility_scope
274         }
275     }
276 }
277
278 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
279     // Adding and removing scopes
280     // ==========================
281     /// Start a breakable scope, which tracks where `continue` and `break`
282     /// should branch to. See module comment for more details.
283     ///
284     /// Returns the might_break attribute of the BreakableScope used.
285     pub fn in_breakable_scope<F, R>(&mut self,
286                                     loop_block: Option<BasicBlock>,
287                                     break_block: BasicBlock,
288                                     break_destination: Lvalue<'tcx>,
289                                     f: F) -> R
290         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
291     {
292         let region_scope = self.topmost_scope();
293         let scope = BreakableScope {
294             region_scope,
295             continue_block: loop_block,
296             break_block,
297             break_destination,
298         };
299         self.breakable_scopes.push(scope);
300         let res = f(self);
301         let breakable_scope = self.breakable_scopes.pop().unwrap();
302         assert!(breakable_scope.region_scope == region_scope);
303         res
304     }
305
306     pub fn in_opt_scope<F, R>(&mut self,
307                               opt_scope: Option<(region::Scope, SourceInfo)>,
308                               mut block: BasicBlock,
309                               f: F)
310                               -> BlockAnd<R>
311         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
312     {
313         debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
314         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
315         let rv = unpack!(block = f(self));
316         if let Some(region_scope) = opt_scope {
317             unpack!(block = self.pop_scope(region_scope, block));
318         }
319         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
320         block.and(rv)
321     }
322
323     /// Convenience wrapper that pushes a scope and then executes `f`
324     /// to build its contents, popping the scope afterwards.
325     pub fn in_scope<F, R>(&mut self,
326                           region_scope: (region::Scope, SourceInfo),
327                           lint_level: LintLevel,
328                           mut block: BasicBlock,
329                           f: F)
330                           -> BlockAnd<R>
331         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
332     {
333         debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
334         let visibility_scope = self.visibility_scope;
335         let tcx = self.hir.tcx();
336         if let LintLevel::Explicit(node_id) = lint_level {
337             let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
338                 let sets = tcx.lint_levels(LOCAL_CRATE);
339                 let parent_hir_id =
340                     tcx.hir.definitions().node_to_hir_id(
341                         self.visibility_scope_info[visibility_scope].lint_root
342                             );
343                 let current_hir_id =
344                     tcx.hir.definitions().node_to_hir_id(node_id);
345                 sets.lint_level_set(parent_hir_id) ==
346                     sets.lint_level_set(current_hir_id)
347             });
348
349             if !same_lint_scopes {
350                 self.visibility_scope =
351                     self.new_visibility_scope(region_scope.1.span, lint_level,
352                                               None);
353             }
354         }
355         self.push_scope(region_scope);
356         let rv = unpack!(block = f(self));
357         unpack!(block = self.pop_scope(region_scope, block));
358         self.visibility_scope = visibility_scope;
359         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
360         block.and(rv)
361     }
362
363     /// Push a scope onto the stack. You can then build code in this
364     /// scope and call `pop_scope` afterwards. Note that these two
365     /// calls must be paired; using `in_scope` as a convenience
366     /// wrapper maybe preferable.
367     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
368         debug!("push_scope({:?})", region_scope);
369         let vis_scope = self.visibility_scope;
370         self.scopes.push(Scope {
371             visibility_scope: vis_scope,
372             region_scope: region_scope.0,
373             region_scope_span: region_scope.1.span,
374             needs_cleanup: false,
375             drops: vec![],
376             cached_generator_drop: None,
377             cached_exits: FxHashMap()
378         });
379     }
380
381     /// Pops a scope, which should have region scope `region_scope`,
382     /// adding any drops onto the end of `block` that are needed.
383     /// This must match 1-to-1 with `push_scope`.
384     pub fn pop_scope(&mut self,
385                      region_scope: (region::Scope, SourceInfo),
386                      mut block: BasicBlock)
387                      -> BlockAnd<()> {
388         debug!("pop_scope({:?}, {:?})", region_scope, block);
389         // If we are emitting a `drop` statement, we need to have the cached
390         // diverge cleanup pads ready in case that drop panics.
391         let may_panic =
392             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
393         if may_panic {
394             self.diverge_cleanup();
395         }
396         let scope = self.scopes.pop().unwrap();
397         assert_eq!(scope.region_scope, region_scope.0);
398
399         self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
400         unpack!(block = build_scope_drops(&mut self.cfg,
401                                           &scope,
402                                           &self.scopes,
403                                           block,
404                                           self.arg_count,
405                                           false));
406
407         block.unit()
408     }
409
410
411     /// Branch out of `block` to `target`, exiting all scopes up to
412     /// and including `region_scope`.  This will insert whatever drops are
413     /// needed, as well as tracking this exit for the SEME region. See
414     /// module comment for details.
415     pub fn exit_scope(&mut self,
416                       span: Span,
417                       region_scope: (region::Scope, SourceInfo),
418                       mut block: BasicBlock,
419                       target: BasicBlock) {
420         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
421                region_scope, block, target);
422         let scope_count = 1 + self.scopes.iter().rev()
423             .position(|scope| scope.region_scope == region_scope.0)
424             .unwrap_or_else(|| {
425                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
426             });
427         let len = self.scopes.len();
428         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
429
430         // If we are emitting a `drop` statement, we need to have the cached
431         // diverge cleanup pads ready in case that drop panics.
432         let may_panic = self.scopes[(len - scope_count)..].iter()
433             .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
434         if may_panic {
435             self.diverge_cleanup();
436         }
437
438         {
439         let mut rest = &mut self.scopes[(len - scope_count)..];
440         while let Some((scope, rest_)) = {rest}.split_last_mut() {
441             rest = rest_;
442             block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
443                 self.cfg.terminate(block, scope.source_info(span),
444                                    TerminatorKind::Goto { target: e });
445                 return;
446             } else {
447                 let b = self.cfg.start_new_block();
448                 self.cfg.terminate(block, scope.source_info(span),
449                                    TerminatorKind::Goto { target: b });
450                 scope.cached_exits.insert((target, region_scope.0), b);
451                 b
452             };
453
454             // End all regions for scopes out of which we are breaking.
455             self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
456
457             unpack!(block = build_scope_drops(&mut self.cfg,
458                                               scope,
459                                               rest,
460                                               block,
461                                               self.arg_count,
462                                               false));
463         }
464         }
465         let scope = &self.scopes[len - scope_count];
466         self.cfg.terminate(block, scope.source_info(span),
467                            TerminatorKind::Goto { target: target });
468     }
469
470     /// Creates a path that performs all required cleanup for dropping a generator.
471     ///
472     /// This path terminates in GeneratorDrop. Returns the start of the path.
473     /// None indicates there’s no cleanup to do at this point.
474     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
475         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
476             return None;
477         }
478
479         // Fill in the cache
480         self.diverge_cleanup_gen(true);
481
482         let src_info = self.scopes[0].source_info(self.fn_span);
483         let mut block = self.cfg.start_new_block();
484         let result = block;
485         let mut rest = &mut self.scopes[..];
486
487         while let Some((scope, rest_)) = {rest}.split_last_mut() {
488             rest = rest_;
489             if !scope.needs_cleanup {
490                 continue;
491             }
492             block = if let Some(b) = scope.cached_generator_drop {
493                 self.cfg.terminate(block, src_info,
494                                    TerminatorKind::Goto { target: b });
495                 return Some(result);
496             } else {
497                 let b = self.cfg.start_new_block();
498                 scope.cached_generator_drop = Some(b);
499                 self.cfg.terminate(block, src_info,
500                                    TerminatorKind::Goto { target: b });
501                 b
502             };
503             unpack!(block = build_scope_drops(&mut self.cfg,
504                                               scope,
505                                               rest,
506                                               block,
507                                               self.arg_count,
508                                               true));
509
510             // End all regions for scopes out of which we are breaking.
511             self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
512         }
513
514         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
515
516         Some(result)
517     }
518
519     /// Creates a new visibility scope, nested in the current one.
520     pub fn new_visibility_scope(&mut self,
521                                 span: Span,
522                                 lint_level: LintLevel,
523                                 safety: Option<Safety>) -> VisibilityScope {
524         let parent = self.visibility_scope;
525         debug!("new_visibility_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
526                span, lint_level, safety,
527                parent, self.visibility_scope_info.get(parent));
528         let scope = self.visibility_scopes.push(VisibilityScopeData {
529             span,
530             parent_scope: Some(parent),
531         });
532         let scope_info = VisibilityScopeInfo {
533             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
534                 lint_root
535             } else {
536                 self.visibility_scope_info[parent].lint_root
537             },
538             safety: safety.unwrap_or_else(|| {
539                 self.visibility_scope_info[parent].safety
540             })
541         };
542         self.visibility_scope_info.push(scope_info);
543         scope
544     }
545
546     // Finding scopes
547     // ==============
548     /// Finds the breakable scope for a given label. This is used for
549     /// resolving `break` and `continue`.
550     pub fn find_breakable_scope(&mut self,
551                            span: Span,
552                            label: region::Scope)
553                            -> &mut BreakableScope<'tcx> {
554         // find the loop-scope with the correct id
555         self.breakable_scopes.iter_mut()
556             .rev()
557             .filter(|breakable_scope| breakable_scope.region_scope == label)
558             .next()
559             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
560     }
561
562     /// Given a span and the current visibility scope, make a SourceInfo.
563     pub fn source_info(&self, span: Span) -> SourceInfo {
564         SourceInfo {
565             span,
566             scope: self.visibility_scope
567         }
568     }
569
570     /// Returns the `region::Scope` of the scope which should be exited by a
571     /// return.
572     pub fn region_scope_of_return_scope(&self) -> region::Scope {
573         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
574         // We want `scopes[1]`, which is the `ParameterScope`.
575         assert!(self.scopes.len() >= 2);
576         assert!(match self.scopes[1].region_scope.data() {
577             region::ScopeData::Arguments(_) => true,
578             _ => false,
579         });
580         self.scopes[1].region_scope
581     }
582
583     /// Returns the topmost active scope, which is known to be alive until
584     /// the next scope expression.
585     pub fn topmost_scope(&self) -> region::Scope {
586         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
587     }
588
589     /// Returns the scope that we should use as the lifetime of an
590     /// operand. Basically, an operand must live until it is consumed.
591     /// This is similar to, but not quite the same as, the temporary
592     /// scope (which can be larger or smaller).
593     ///
594     /// Consider:
595     ///
596     ///     let x = foo(bar(X, Y));
597     ///
598     /// We wish to pop the storage for X and Y after `bar()` is
599     /// called, not after the whole `let` is completed.
600     ///
601     /// As another example, if the second argument diverges:
602     ///
603     ///     foo(Box::new(2), panic!())
604     ///
605     /// We would allocate the box but then free it on the unwinding
606     /// path; we would also emit a free on the 'success' path from
607     /// panic, but that will turn out to be removed as dead-code.
608     ///
609     /// When building statics/constants, returns `None` since
610     /// intermediate values do not have to be dropped in that case.
611     pub fn local_scope(&self) -> Option<region::Scope> {
612         match self.hir.src {
613             MirSource::Const(_) |
614             MirSource::Static(..) =>
615                 // No need to free storage in this context.
616                 None,
617             MirSource::Fn(_) =>
618                 Some(self.topmost_scope()),
619             MirSource::Promoted(..) |
620             MirSource::GeneratorDrop(..) =>
621                 bug!(),
622         }
623     }
624
625     // Scheduling drops
626     // ================
627     /// Indicates that `lvalue` should be dropped on exit from
628     /// `region_scope`.
629     pub fn schedule_drop(&mut self,
630                          span: Span,
631                          region_scope: region::Scope,
632                          lvalue: &Lvalue<'tcx>,
633                          lvalue_ty: Ty<'tcx>) {
634         let needs_drop = self.hir.needs_drop(lvalue_ty);
635         let drop_kind = if needs_drop {
636             DropKind::Value { cached_block: CachedBlock::default() }
637         } else {
638             // Only temps and vars need their storage dead.
639             match *lvalue {
640                 Lvalue::Local(index) if index.index() > self.arg_count => DropKind::Storage,
641                 _ => return
642             }
643         };
644
645         for scope in self.scopes.iter_mut().rev() {
646             let this_scope = scope.region_scope == region_scope;
647             // When building drops, we try to cache chains of drops in such a way so these drops
648             // could be reused by the drops which would branch into the cached (already built)
649             // blocks.  This, however, means that whenever we add a drop into a scope which already
650             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
651             // might branch into the scope which had a drop just added to it. This is necessary,
652             // because otherwise some other code might use the cache to branch into already built
653             // chain of drops, essentially ignoring the newly added drop.
654             //
655             // For example consider there’s two scopes with a drop in each. These are built and
656             // thus the caches are filled:
657             //
658             // +--------------------------------------------------------+
659             // | +---------------------------------+                    |
660             // | | +--------+     +-------------+  |  +---------------+ |
661             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
662             // | | +--------+     +-------------+  |  +---------------+ |
663             // | +------------|outer_scope cache|--+                    |
664             // +------------------------------|middle_scope cache|------+
665             //
666             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
667             // outer-most scopes:
668             //
669             // +------------------------------------------------------------+
670             // | +----------------------------------+                       |
671             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
672             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
673             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
674             // | |             +-+ +-------------+  |                       |
675             // | +---|invalid outer_scope cache|----+                       |
676             // +----=----------------|invalid middle_scope cache|-----------+
677             //
678             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
679             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
680             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
681             // wrong results.
682             //
683             // The cache and its invalidation for unwind branch is somewhat special. The cache is
684             // per-drop, rather than per scope, which has a several different implications. Adding
685             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
686             // scope. That is true, because none of the already existing drops will have an edge
687             // into a block with the newly added drop.
688             //
689             // Note that this code iterates scopes from the inner-most to the outer-most,
690             // invalidating caches of each scope visited. This way bare minimum of the
691             // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
692             // cache of outer scpoe stays intact.
693             scope.invalidate_cache(!needs_drop, this_scope);
694             if this_scope {
695                 if let DropKind::Value { .. } = drop_kind {
696                     scope.needs_cleanup = true;
697                 }
698                 let region_scope_span = region_scope.span(self.hir.tcx(),
699                                                           &self.hir.region_scope_tree);
700                 // Attribute scope exit drops to scope's closing brace
701                 let scope_end = region_scope_span.with_lo(region_scope_span.hi());
702                 scope.drops.push(DropData {
703                     span: scope_end,
704                     location: lvalue.clone(),
705                     kind: drop_kind
706                 });
707                 return;
708             }
709         }
710         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, lvalue);
711     }
712
713     // Other
714     // =====
715     /// Creates a path that performs all required cleanup for unwinding.
716     ///
717     /// This path terminates in Resume. Returns the start of the path.
718     /// See module comment for more details. None indicates there’s no
719     /// cleanup to do at this point.
720     pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
721         self.diverge_cleanup_gen(false)
722     }
723
724     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> {
725         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
726             return None;
727         }
728         assert!(!self.scopes.is_empty()); // or `any` above would be false
729
730         let Builder { ref mut cfg, ref mut scopes,
731                       ref mut cached_resume_block, .. } = *self;
732
733         // Build up the drops in **reverse** order. The end result will
734         // look like:
735         //
736         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
737         //
738         // However, we build this in **reverse order**. That is, we
739         // process scopes[0], then scopes[1], etc, pointing each one at
740         // the result generates from the one before. Along the way, we
741         // store caches. If everything is cached, we'll just walk right
742         // to left reading the cached results but never created anything.
743
744         // To start, create the resume terminator.
745         let mut target = if let Some(target) = *cached_resume_block {
746             target
747         } else {
748             let resumeblk = cfg.start_new_cleanup_block();
749             cfg.terminate(resumeblk,
750                           scopes[0].source_info(self.fn_span),
751                           TerminatorKind::Resume);
752             *cached_resume_block = Some(resumeblk);
753             resumeblk
754         };
755
756         for scope in scopes.iter_mut() {
757             target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
758                                          scope, target, generator_drop);
759         }
760         Some(target)
761     }
762
763     /// Utility function for *non*-scope code to build their own drops
764     pub fn build_drop(&mut self,
765                       block: BasicBlock,
766                       span: Span,
767                       location: Lvalue<'tcx>,
768                       ty: Ty<'tcx>) -> BlockAnd<()> {
769         if !self.hir.needs_drop(ty) {
770             return block.unit();
771         }
772         let source_info = self.source_info(span);
773         let next_target = self.cfg.start_new_block();
774         let diverge_target = self.diverge_cleanup();
775         self.cfg.terminate(block, source_info,
776                            TerminatorKind::Drop {
777                                location,
778                                target: next_target,
779                                unwind: diverge_target,
780                            });
781         next_target.unit()
782     }
783
784     /// Utility function for *non*-scope code to build their own drops
785     pub fn build_drop_and_replace(&mut self,
786                                   block: BasicBlock,
787                                   span: Span,
788                                   location: Lvalue<'tcx>,
789                                   value: Operand<'tcx>) -> BlockAnd<()> {
790         let source_info = self.source_info(span);
791         let next_target = self.cfg.start_new_block();
792         let diverge_target = self.diverge_cleanup();
793         self.cfg.terminate(block, source_info,
794                            TerminatorKind::DropAndReplace {
795                                location,
796                                value,
797                                target: next_target,
798                                unwind: diverge_target,
799                            });
800         next_target.unit()
801     }
802
803     /// Create an Assert terminator and return the success block.
804     /// If the boolean condition operand is not the expected value,
805     /// a runtime panic will be caused with the given message.
806     pub fn assert(&mut self, block: BasicBlock,
807                   cond: Operand<'tcx>,
808                   expected: bool,
809                   msg: AssertMessage<'tcx>,
810                   span: Span)
811                   -> BasicBlock {
812         let source_info = self.source_info(span);
813
814         let success_block = self.cfg.start_new_block();
815         let cleanup = self.diverge_cleanup();
816
817         self.cfg.terminate(block, source_info,
818                            TerminatorKind::Assert {
819                                cond,
820                                expected,
821                                msg,
822                                target: success_block,
823                                cleanup,
824                            });
825
826         success_block
827     }
828 }
829
830 /// Builds drops for pop_scope and exit_scope.
831 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
832                            scope: &Scope<'tcx>,
833                            earlier_scopes: &[Scope<'tcx>],
834                            mut block: BasicBlock,
835                            arg_count: usize,
836                            generator_drop: bool)
837                            -> BlockAnd<()> {
838     debug!("build_scope_drops({:?} -> {:?})", block, scope);
839     let mut iter = scope.drops.iter().rev().peekable();
840     while let Some(drop_data) = iter.next() {
841         let source_info = scope.source_info(drop_data.span);
842         match drop_data.kind {
843             DropKind::Value { .. } => {
844                 // Try to find the next block with its cached block
845                 // for us to diverge into in case the drop panics.
846                 let on_diverge = iter.peek().iter().filter_map(|dd| {
847                     match dd.kind {
848                         DropKind::Value { cached_block } => {
849                             let result = cached_block.get(generator_drop);
850                             if result.is_none() {
851                                 span_bug!(drop_data.span, "cached block not present?")
852                             }
853                             result
854                         },
855                         DropKind::Storage => None
856                     }
857                 }).next();
858                 // If there’s no `cached_block`s within current scope,
859                 // we must look for one in the enclosing scope.
860                 let on_diverge = on_diverge.or_else(|| {
861                     earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next()
862                 });
863                 let next = cfg.start_new_block();
864                 cfg.terminate(block, source_info, TerminatorKind::Drop {
865                     location: drop_data.location.clone(),
866                     target: next,
867                     unwind: on_diverge
868                 });
869                 block = next;
870             }
871             DropKind::Storage => {}
872         }
873
874         // We do not need to emit StorageDead for generator drops
875         if generator_drop {
876             continue
877         }
878
879         // Drop the storage for both value and storage drops.
880         // Only temps and vars need their storage dead.
881         match drop_data.location {
882             Lvalue::Local(index) if index.index() > arg_count => {
883                 cfg.push(block, Statement {
884                     source_info,
885                     kind: StatementKind::StorageDead(index)
886                 });
887             }
888             _ => continue
889         }
890     }
891     block.unit()
892 }
893
894 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
895                                        cfg: &mut CFG<'tcx>,
896                                        span: Span,
897                                        scope: &mut Scope<'tcx>,
898                                        mut target: BasicBlock,
899                                        generator_drop: bool)
900                                        -> BasicBlock
901 {
902     // Build up the drops in **reverse** order. The end result will
903     // look like:
904     //
905     //    [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
906     //    |                                                         |
907     //    +---------------------------------------------------------+
908     //     code for scope
909     //
910     // The code in this function reads from right to left. At each
911     // point, we check for cached blocks representing the
912     // remainder. If everything is cached, we'll just walk right to
913     // left reading the cached results but never create anything.
914
915     let visibility_scope = scope.visibility_scope;
916     let source_info = |span| SourceInfo {
917         span,
918         scope: visibility_scope
919     };
920
921     // Next, build up the drops. Here we iterate the vector in
922     // *forward* order, so that we generate drops[0] first (right to
923     // left in diagram above).
924     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
925         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
926         // Only full value drops are emitted in the diverging path,
927         // not StorageDead.
928         //
929         // Note: This may not actually be what we desire (are we
930         // "freeing" stack storage as we unwind, or merely observing a
931         // frozen stack)? In particular, the intent may have been to
932         // match the behavior of clang, but on inspection eddyb says
933         // this is not what clang does.
934         let cached_block = match drop_data.kind {
935             DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
936             DropKind::Storage => continue
937         };
938         target = if let Some(cached_block) = *cached_block {
939             cached_block
940         } else {
941             let block = cfg.start_new_cleanup_block();
942             cfg.terminate(block, source_info(drop_data.span),
943                           TerminatorKind::Drop {
944                               location: drop_data.location.clone(),
945                               target,
946                               unwind: None
947                           });
948             *cached_block = Some(block);
949             block
950         };
951     }
952
953     // Finally, push the EndRegion block, used by mir-borrowck. (Block
954     // becomes trivial goto after pass that removes all EndRegions.)
955     {
956         let block = cfg.start_new_cleanup_block();
957         cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
958         cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
959         target = block
960     }
961
962     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
963
964     target
965 }