]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
Auto merge of #46393 - kennytm:45861-step-2-3-make-tools-job-not-fail-fast, r=alexcri...
[rust.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `region::Scope`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate places that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those places onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```
51 # let cond = true;
52 loop {
53     let x = ..;
54     if cond { break; }
55     let y = ..;
56 }
57 ```
58
59 When processing the `let x`, we will add one drop to the scope for
60 `x`.  The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
63
64 ### Early exit
65
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
72 also add all drops.
73
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
80 result.
81
82 ### Loop scopes
83
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
86 should go to.
87
88 */
89
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
91 use hair::LintLevel;
92 use rustc::middle::region;
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::hir;
95 use rustc::hir::def_id::LOCAL_CRATE;
96 use rustc::mir::*;
97 use syntax_pos::{Span};
98 use rustc_data_structures::indexed_vec::Idx;
99 use rustc_data_structures::fx::FxHashMap;
100
101 #[derive(Debug)]
102 pub struct Scope<'tcx> {
103     /// The visibility scope this scope was created in.
104     visibility_scope: VisibilityScope,
105
106     /// the region span of this scope within source code.
107     region_scope: region::Scope,
108
109     /// the span of that region_scope
110     region_scope_span: Span,
111
112     /// Whether there's anything to do for the cleanup path, that is,
113     /// when unwinding through this scope. This includes destructors,
114     /// but not StorageDead statements, which don't get emitted at all
115     /// for unwinding, for several reasons:
116     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
117     ///  * LLVM's memory dependency analysis can't handle it atm
118     ///  * polluting the cleanup MIR with StorageDead creates
119     ///    landing pads even though there's no actual destructors
120     ///  * freeing up stack space has no effect during unwinding
121     needs_cleanup: bool,
122
123     /// set of places to drop when exiting this scope. This starts
124     /// out empty but grows as variables are declared during the
125     /// building process. This is a stack, so we always drop from the
126     /// end of the vector (top of the stack) first.
127     drops: Vec<DropData<'tcx>>,
128
129     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
130     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
131
132     /// The cache for drop chain on "generator drop" exit.
133     cached_generator_drop: Option<BasicBlock>,
134
135     /// The cache for drop chain on "unwind" exit.
136     cached_unwind: CachedBlock,
137 }
138
139 #[derive(Debug)]
140 struct DropData<'tcx> {
141     /// span where drop obligation was incurred (typically where place was declared)
142     span: Span,
143
144     /// place to drop
145     location: Place<'tcx>,
146
147     /// Whether this is a full value Drop, or just a StorageDead.
148     kind: DropKind
149 }
150
151 #[derive(Debug, Default, Clone, Copy)]
152 struct CachedBlock {
153     /// The cached block for the cleanups-on-diverge path. This block
154     /// contains code to run the current drop and all the preceding
155     /// drops (i.e. those having lower index in Drop’s Scope drop
156     /// array)
157     unwind: Option<BasicBlock>,
158
159     /// The cached block for unwinds during cleanups-on-generator-drop path
160     ///
161     /// This is split from the standard unwind path here to prevent drop
162     /// elaboration from creating drop flags that would have to be captured
163     /// by the generator. I'm not sure how important this optimization is,
164     /// but it is here.
165     generator_drop: Option<BasicBlock>,
166 }
167
168 #[derive(Debug)]
169 enum DropKind {
170     Value {
171         cached_block: CachedBlock,
172     },
173     Storage
174 }
175
176 #[derive(Clone, Debug)]
177 pub struct BreakableScope<'tcx> {
178     /// Region scope of the loop
179     pub region_scope: region::Scope,
180     /// Where the body of the loop begins. `None` if block
181     pub continue_block: Option<BasicBlock>,
182     /// Block to branch into when the loop or block terminates (either by being `break`-en out
183     /// from, or by having its condition to become false)
184     pub break_block: BasicBlock,
185     /// The destination of the loop/block expression itself (i.e. where to put the result of a
186     /// `break` expression)
187     pub break_destination: Place<'tcx>,
188 }
189
190 impl CachedBlock {
191     fn invalidate(&mut self) {
192         self.generator_drop = None;
193         self.unwind = None;
194     }
195
196     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
197         if generator_drop {
198             self.generator_drop
199         } else {
200             self.unwind
201         }
202     }
203
204     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
205         if generator_drop {
206             &mut self.generator_drop
207         } else {
208             &mut self.unwind
209         }
210     }
211 }
212
213 impl DropKind {
214     fn may_panic(&self) -> bool {
215         match *self {
216             DropKind::Value { .. } => true,
217             DropKind::Storage => false
218         }
219     }
220 }
221
222 impl<'tcx> Scope<'tcx> {
223     /// Invalidate all the cached blocks in the scope.
224     ///
225     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
226     /// larger extent of code.
227     ///
228     /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
229     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
230     /// top-of-scope (as opposed to dependent scopes).
231     fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
232         // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
233         // with lots of `try!`?
234
235         // cached exits drop storage and refer to the top-of-scope
236         self.cached_exits.clear();
237
238         if !storage_only {
239             // the current generator drop and unwind ignore
240             // storage but refer to top-of-scope
241             self.cached_generator_drop = None;
242             self.cached_unwind.invalidate();
243         }
244
245         if !storage_only && !this_scope_only {
246             for dropdata in &mut self.drops {
247                 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
248                     cached_block.invalidate();
249                 }
250             }
251         }
252     }
253
254     /// Given a span and this scope's visibility scope, make a SourceInfo.
255     fn source_info(&self, span: Span) -> SourceInfo {
256         SourceInfo {
257             span,
258             scope: self.visibility_scope
259         }
260     }
261 }
262
263 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
264     // Adding and removing scopes
265     // ==========================
266     /// Start a breakable scope, which tracks where `continue` and `break`
267     /// should branch to. See module comment for more details.
268     ///
269     /// Returns the might_break attribute of the BreakableScope used.
270     pub fn in_breakable_scope<F, R>(&mut self,
271                                     loop_block: Option<BasicBlock>,
272                                     break_block: BasicBlock,
273                                     break_destination: Place<'tcx>,
274                                     f: F) -> R
275         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
276     {
277         let region_scope = self.topmost_scope();
278         let scope = BreakableScope {
279             region_scope,
280             continue_block: loop_block,
281             break_block,
282             break_destination,
283         };
284         self.breakable_scopes.push(scope);
285         let res = f(self);
286         let breakable_scope = self.breakable_scopes.pop().unwrap();
287         assert!(breakable_scope.region_scope == region_scope);
288         res
289     }
290
291     pub fn in_opt_scope<F, R>(&mut self,
292                               opt_scope: Option<(region::Scope, SourceInfo)>,
293                               mut block: BasicBlock,
294                               f: F)
295                               -> BlockAnd<R>
296         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
297     {
298         debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
299         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
300         let rv = unpack!(block = f(self));
301         if let Some(region_scope) = opt_scope {
302             unpack!(block = self.pop_scope(region_scope, block));
303         }
304         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
305         block.and(rv)
306     }
307
308     /// Convenience wrapper that pushes a scope and then executes `f`
309     /// to build its contents, popping the scope afterwards.
310     pub fn in_scope<F, R>(&mut self,
311                           region_scope: (region::Scope, SourceInfo),
312                           lint_level: LintLevel,
313                           mut block: BasicBlock,
314                           f: F)
315                           -> BlockAnd<R>
316         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
317     {
318         debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
319         let visibility_scope = self.visibility_scope;
320         let tcx = self.hir.tcx();
321         if let LintLevel::Explicit(node_id) = lint_level {
322             let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
323                 let sets = tcx.lint_levels(LOCAL_CRATE);
324                 let parent_hir_id =
325                     tcx.hir.definitions().node_to_hir_id(
326                         self.visibility_scope_info[visibility_scope].lint_root
327                             );
328                 let current_hir_id =
329                     tcx.hir.definitions().node_to_hir_id(node_id);
330                 sets.lint_level_set(parent_hir_id) ==
331                     sets.lint_level_set(current_hir_id)
332             });
333
334             if !same_lint_scopes {
335                 self.visibility_scope =
336                     self.new_visibility_scope(region_scope.1.span, lint_level,
337                                               None);
338             }
339         }
340         self.push_scope(region_scope);
341         let rv = unpack!(block = f(self));
342         unpack!(block = self.pop_scope(region_scope, block));
343         self.visibility_scope = visibility_scope;
344         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
345         block.and(rv)
346     }
347
348     /// Push a scope onto the stack. You can then build code in this
349     /// scope and call `pop_scope` afterwards. Note that these two
350     /// calls must be paired; using `in_scope` as a convenience
351     /// wrapper maybe preferable.
352     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
353         debug!("push_scope({:?})", region_scope);
354         let vis_scope = self.visibility_scope;
355         self.scopes.push(Scope {
356             visibility_scope: vis_scope,
357             region_scope: region_scope.0,
358             region_scope_span: region_scope.1.span,
359             needs_cleanup: false,
360             drops: vec![],
361             cached_generator_drop: None,
362             cached_exits: FxHashMap(),
363             cached_unwind: CachedBlock::default(),
364         });
365     }
366
367     /// Pops a scope, which should have region scope `region_scope`,
368     /// adding any drops onto the end of `block` that are needed.
369     /// This must match 1-to-1 with `push_scope`.
370     pub fn pop_scope(&mut self,
371                      region_scope: (region::Scope, SourceInfo),
372                      mut block: BasicBlock)
373                      -> BlockAnd<()> {
374         debug!("pop_scope({:?}, {:?})", region_scope, block);
375         // If we are emitting a `drop` statement, we need to have the cached
376         // diverge cleanup pads ready in case that drop panics.
377         let may_panic =
378             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
379         if may_panic {
380             self.diverge_cleanup();
381         }
382         let scope = self.scopes.pop().unwrap();
383         assert_eq!(scope.region_scope, region_scope.0);
384
385         self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
386         let resume_block = self.resume_block();
387         unpack!(block = build_scope_drops(&mut self.cfg,
388                                           resume_block,
389                                           &scope,
390                                           &self.scopes,
391                                           block,
392                                           self.arg_count,
393                                           false));
394
395         block.unit()
396     }
397
398
399     /// Branch out of `block` to `target`, exiting all scopes up to
400     /// and including `region_scope`.  This will insert whatever drops are
401     /// needed, as well as tracking this exit for the SEME region. See
402     /// module comment for details.
403     pub fn exit_scope(&mut self,
404                       span: Span,
405                       region_scope: (region::Scope, SourceInfo),
406                       mut block: BasicBlock,
407                       target: BasicBlock) {
408         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
409                region_scope, block, target);
410         let scope_count = 1 + self.scopes.iter().rev()
411             .position(|scope| scope.region_scope == region_scope.0)
412             .unwrap_or_else(|| {
413                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
414             });
415         let len = self.scopes.len();
416         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
417
418         // If we are emitting a `drop` statement, we need to have the cached
419         // diverge cleanup pads ready in case that drop panics.
420         let may_panic = self.scopes[(len - scope_count)..].iter()
421             .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
422         if may_panic {
423             self.diverge_cleanup();
424         }
425
426         {
427         let resume_block = self.resume_block();
428         let mut rest = &mut self.scopes[(len - scope_count)..];
429         while let Some((scope, rest_)) = {rest}.split_last_mut() {
430             rest = rest_;
431             block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
432                 self.cfg.terminate(block, scope.source_info(span),
433                                    TerminatorKind::Goto { target: e });
434                 return;
435             } else {
436                 let b = self.cfg.start_new_block();
437                 self.cfg.terminate(block, scope.source_info(span),
438                                    TerminatorKind::Goto { target: b });
439                 scope.cached_exits.insert((target, region_scope.0), b);
440                 b
441             };
442
443             // End all regions for scopes out of which we are breaking.
444             self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
445
446             unpack!(block = build_scope_drops(&mut self.cfg,
447                                               resume_block,
448                                               scope,
449                                               rest,
450                                               block,
451                                               self.arg_count,
452                                               false));
453         }
454         }
455         let scope = &self.scopes[len - scope_count];
456         self.cfg.terminate(block, scope.source_info(span),
457                            TerminatorKind::Goto { target: target });
458     }
459
460     /// Creates a path that performs all required cleanup for dropping a generator.
461     ///
462     /// This path terminates in GeneratorDrop. Returns the start of the path.
463     /// None indicates there’s no cleanup to do at this point.
464     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
465         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
466             return None;
467         }
468
469         // Fill in the cache
470         self.diverge_cleanup_gen(true);
471
472         let src_info = self.scopes[0].source_info(self.fn_span);
473         let mut block = self.cfg.start_new_block();
474         let result = block;
475         let resume_block = self.resume_block();
476         let mut rest = &mut self.scopes[..];
477
478         while let Some((scope, rest_)) = {rest}.split_last_mut() {
479             rest = rest_;
480             if !scope.needs_cleanup {
481                 continue;
482             }
483             block = if let Some(b) = scope.cached_generator_drop {
484                 self.cfg.terminate(block, src_info,
485                                    TerminatorKind::Goto { target: b });
486                 return Some(result);
487             } else {
488                 let b = self.cfg.start_new_block();
489                 scope.cached_generator_drop = Some(b);
490                 self.cfg.terminate(block, src_info,
491                                    TerminatorKind::Goto { target: b });
492                 b
493             };
494
495             // End all regions for scopes out of which we are breaking.
496             self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
497
498             unpack!(block = build_scope_drops(&mut self.cfg,
499                                               resume_block,
500                                               scope,
501                                               rest,
502                                               block,
503                                               self.arg_count,
504                                               true));
505         }
506
507         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
508
509         Some(result)
510     }
511
512     /// Creates a new visibility scope, nested in the current one.
513     pub fn new_visibility_scope(&mut self,
514                                 span: Span,
515                                 lint_level: LintLevel,
516                                 safety: Option<Safety>) -> VisibilityScope {
517         let parent = self.visibility_scope;
518         debug!("new_visibility_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
519                span, lint_level, safety,
520                parent, self.visibility_scope_info.get(parent));
521         let scope = self.visibility_scopes.push(VisibilityScopeData {
522             span,
523             parent_scope: Some(parent),
524         });
525         let scope_info = VisibilityScopeInfo {
526             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
527                 lint_root
528             } else {
529                 self.visibility_scope_info[parent].lint_root
530             },
531             safety: safety.unwrap_or_else(|| {
532                 self.visibility_scope_info[parent].safety
533             })
534         };
535         self.visibility_scope_info.push(scope_info);
536         scope
537     }
538
539     // Finding scopes
540     // ==============
541     /// Finds the breakable scope for a given label. This is used for
542     /// resolving `break` and `continue`.
543     pub fn find_breakable_scope(&mut self,
544                            span: Span,
545                            label: region::Scope)
546                            -> &mut BreakableScope<'tcx> {
547         // find the loop-scope with the correct id
548         self.breakable_scopes.iter_mut()
549             .rev()
550             .filter(|breakable_scope| breakable_scope.region_scope == label)
551             .next()
552             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
553     }
554
555     /// Given a span and the current visibility scope, make a SourceInfo.
556     pub fn source_info(&self, span: Span) -> SourceInfo {
557         SourceInfo {
558             span,
559             scope: self.visibility_scope
560         }
561     }
562
563     /// Returns the `region::Scope` of the scope which should be exited by a
564     /// return.
565     pub fn region_scope_of_return_scope(&self) -> region::Scope {
566         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
567         // We want `scopes[1]`, which is the `ParameterScope`.
568         assert!(self.scopes.len() >= 2);
569         assert!(match self.scopes[1].region_scope.data() {
570             region::ScopeData::Arguments(_) => true,
571             _ => false,
572         });
573         self.scopes[1].region_scope
574     }
575
576     /// Returns the topmost active scope, which is known to be alive until
577     /// the next scope expression.
578     pub fn topmost_scope(&self) -> region::Scope {
579         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
580     }
581
582     /// Returns the scope that we should use as the lifetime of an
583     /// operand. Basically, an operand must live until it is consumed.
584     /// This is similar to, but not quite the same as, the temporary
585     /// scope (which can be larger or smaller).
586     ///
587     /// Consider:
588     ///
589     ///     let x = foo(bar(X, Y));
590     ///
591     /// We wish to pop the storage for X and Y after `bar()` is
592     /// called, not after the whole `let` is completed.
593     ///
594     /// As another example, if the second argument diverges:
595     ///
596     ///     foo(Box::new(2), panic!())
597     ///
598     /// We would allocate the box but then free it on the unwinding
599     /// path; we would also emit a free on the 'success' path from
600     /// panic, but that will turn out to be removed as dead-code.
601     ///
602     /// When building statics/constants, returns `None` since
603     /// intermediate values do not have to be dropped in that case.
604     pub fn local_scope(&self) -> Option<region::Scope> {
605         match self.hir.body_owner_kind {
606             hir::BodyOwnerKind::Const |
607             hir::BodyOwnerKind::Static(_) =>
608                 // No need to free storage in this context.
609                 None,
610             hir::BodyOwnerKind::Fn =>
611                 Some(self.topmost_scope()),
612         }
613     }
614
615     // Scheduling drops
616     // ================
617     /// Indicates that `place` should be dropped on exit from
618     /// `region_scope`.
619     pub fn schedule_drop(&mut self,
620                          span: Span,
621                          region_scope: region::Scope,
622                          place: &Place<'tcx>,
623                          place_ty: Ty<'tcx>) {
624         let needs_drop = self.hir.needs_drop(place_ty);
625         let drop_kind = if needs_drop {
626             DropKind::Value { cached_block: CachedBlock::default() }
627         } else {
628             // Only temps and vars need their storage dead.
629             match *place {
630                 Place::Local(index) if index.index() > self.arg_count => DropKind::Storage,
631                 _ => return
632             }
633         };
634
635         for scope in self.scopes.iter_mut().rev() {
636             let this_scope = scope.region_scope == region_scope;
637             // When building drops, we try to cache chains of drops in such a way so these drops
638             // could be reused by the drops which would branch into the cached (already built)
639             // blocks.  This, however, means that whenever we add a drop into a scope which already
640             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
641             // might branch into the scope which had a drop just added to it. This is necessary,
642             // because otherwise some other code might use the cache to branch into already built
643             // chain of drops, essentially ignoring the newly added drop.
644             //
645             // For example consider there’s two scopes with a drop in each. These are built and
646             // thus the caches are filled:
647             //
648             // +--------------------------------------------------------+
649             // | +---------------------------------+                    |
650             // | | +--------+     +-------------+  |  +---------------+ |
651             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
652             // | | +--------+     +-------------+  |  +---------------+ |
653             // | +------------|outer_scope cache|--+                    |
654             // +------------------------------|middle_scope cache|------+
655             //
656             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
657             // outer-most scopes:
658             //
659             // +------------------------------------------------------------+
660             // | +----------------------------------+                       |
661             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
662             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
663             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
664             // | |             +-+ +-------------+  |                       |
665             // | +---|invalid outer_scope cache|----+                       |
666             // +----=----------------|invalid middle_scope cache|-----------+
667             //
668             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
669             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
670             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
671             // wrong results.
672             //
673             // The cache and its invalidation for unwind branch is somewhat special. The cache is
674             // per-drop, rather than per scope, which has a several different implications. Adding
675             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
676             // scope. That is true, because none of the already existing drops will have an edge
677             // into a block with the newly added drop.
678             //
679             // Note that this code iterates scopes from the inner-most to the outer-most,
680             // invalidating caches of each scope visited. This way bare minimum of the
681             // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
682             // cache of outer scpoe stays intact.
683             scope.invalidate_cache(!needs_drop, this_scope);
684             if this_scope {
685                 if let DropKind::Value { .. } = drop_kind {
686                     scope.needs_cleanup = true;
687                 }
688                 let region_scope_span = region_scope.span(self.hir.tcx(),
689                                                           &self.hir.region_scope_tree);
690                 // Attribute scope exit drops to scope's closing brace
691                 let scope_end = region_scope_span.with_lo(region_scope_span.hi());
692                 scope.drops.push(DropData {
693                     span: scope_end,
694                     location: place.clone(),
695                     kind: drop_kind
696                 });
697                 return;
698             }
699         }
700         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
701     }
702
703     // Other
704     // =====
705     /// Creates a path that performs all required cleanup for unwinding.
706     ///
707     /// This path terminates in Resume. Returns the start of the path.
708     /// See module comment for more details. None indicates there’s no
709     /// cleanup to do at this point.
710     pub fn diverge_cleanup(&mut self) -> BasicBlock {
711         self.diverge_cleanup_gen(false)
712     }
713
714     fn resume_block(&mut self) -> BasicBlock {
715         if let Some(target) = self.cached_resume_block {
716             target
717         } else {
718             let resumeblk = self.cfg.start_new_cleanup_block();
719             self.cfg.terminate(resumeblk,
720                                SourceInfo {
721                                    scope: ARGUMENT_VISIBILITY_SCOPE,
722                                    span: self.fn_span
723                                },
724                                TerminatorKind::Resume);
725             self.cached_resume_block = Some(resumeblk);
726             resumeblk
727         }
728     }
729
730     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
731         // To start, create the resume terminator.
732         let mut target = self.resume_block();
733
734         let Builder { ref mut cfg, ref mut scopes, .. } = *self;
735
736         // Build up the drops in **reverse** order. The end result will
737         // look like:
738         //
739         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
740         //
741         // However, we build this in **reverse order**. That is, we
742         // process scopes[0], then scopes[1], etc, pointing each one at
743         // the result generates from the one before. Along the way, we
744         // store caches. If everything is cached, we'll just walk right
745         // to left reading the cached results but never created anything.
746
747         if scopes.iter().any(|scope| scope.needs_cleanup) {
748             for scope in scopes.iter_mut() {
749                 target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
750                                              scope, target, generator_drop);
751             }
752         }
753
754         target
755     }
756
757     /// Utility function for *non*-scope code to build their own drops
758     pub fn build_drop(&mut self,
759                       block: BasicBlock,
760                       span: Span,
761                       location: Place<'tcx>,
762                       ty: Ty<'tcx>) -> BlockAnd<()> {
763         if !self.hir.needs_drop(ty) {
764             return block.unit();
765         }
766         let source_info = self.source_info(span);
767         let next_target = self.cfg.start_new_block();
768         let diverge_target = self.diverge_cleanup();
769         self.cfg.terminate(block, source_info,
770                            TerminatorKind::Drop {
771                                location,
772                                target: next_target,
773                                unwind: Some(diverge_target),
774                            });
775         next_target.unit()
776     }
777
778     /// Utility function for *non*-scope code to build their own drops
779     pub fn build_drop_and_replace(&mut self,
780                                   block: BasicBlock,
781                                   span: Span,
782                                   location: Place<'tcx>,
783                                   value: Operand<'tcx>) -> BlockAnd<()> {
784         let source_info = self.source_info(span);
785         let next_target = self.cfg.start_new_block();
786         let diverge_target = self.diverge_cleanup();
787         self.cfg.terminate(block, source_info,
788                            TerminatorKind::DropAndReplace {
789                                location,
790                                value,
791                                target: next_target,
792                                unwind: Some(diverge_target),
793                            });
794         next_target.unit()
795     }
796
797     /// Create an Assert terminator and return the success block.
798     /// If the boolean condition operand is not the expected value,
799     /// a runtime panic will be caused with the given message.
800     pub fn assert(&mut self, block: BasicBlock,
801                   cond: Operand<'tcx>,
802                   expected: bool,
803                   msg: AssertMessage<'tcx>,
804                   span: Span)
805                   -> BasicBlock {
806         let source_info = self.source_info(span);
807
808         let success_block = self.cfg.start_new_block();
809         let cleanup = self.diverge_cleanup();
810
811         self.cfg.terminate(block, source_info,
812                            TerminatorKind::Assert {
813                                cond,
814                                expected,
815                                msg,
816                                target: success_block,
817                                cleanup: Some(cleanup),
818                            });
819
820         success_block
821     }
822 }
823
824 /// Builds drops for pop_scope and exit_scope.
825 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
826                            resume_block: BasicBlock,
827                            scope: &Scope<'tcx>,
828                            earlier_scopes: &[Scope<'tcx>],
829                            mut block: BasicBlock,
830                            arg_count: usize,
831                            generator_drop: bool)
832                            -> BlockAnd<()> {
833     debug!("build_scope_drops({:?} -> {:?})", block, scope);
834     let mut iter = scope.drops.iter().rev();
835     while let Some(drop_data) = iter.next() {
836         let source_info = scope.source_info(drop_data.span);
837         match drop_data.kind {
838             DropKind::Value { .. } => {
839                 // Try to find the next block with its cached block for us to
840                 // diverge into, either a previous block in this current scope or
841                 // the top of the previous scope.
842                 //
843                 // If it wasn't for EndRegion, we could just chain all the DropData
844                 // together and pick the first DropKind::Value. Please do that
845                 // when we replace EndRegion with NLL.
846                 let on_diverge = iter.clone().filter_map(|dd| {
847                     match dd.kind {
848                         DropKind::Value { cached_block } => Some(cached_block),
849                         DropKind::Storage => None
850                     }
851                 }).next().or_else(|| {
852                     if earlier_scopes.iter().any(|scope| scope.needs_cleanup) {
853                         // If *any* scope requires cleanup code to be run,
854                         // we must use the cached unwind from the *topmost*
855                         // scope, to ensure all EndRegions from surrounding
856                         // scopes are executed before the drop code runs.
857                         Some(earlier_scopes.last().unwrap().cached_unwind)
858                     } else {
859                         // We don't need any further cleanup, so return None
860                         // to avoid creating a landing pad. We can skip
861                         // EndRegions because all local regions end anyway
862                         // when the function unwinds.
863                         //
864                         // This is an important optimization because LLVM is
865                         // terrible at optimizing landing pads. FIXME: I think
866                         // it would be cleaner and better to do this optimization
867                         // in SimplifyCfg instead of here.
868                         None
869                     }
870                 });
871
872                 let on_diverge = on_diverge.map(|cached_block| {
873                     cached_block.get(generator_drop).unwrap_or_else(|| {
874                         span_bug!(drop_data.span, "cached block not present?")
875                     })
876                 });
877
878                 let next = cfg.start_new_block();
879                 cfg.terminate(block, source_info, TerminatorKind::Drop {
880                     location: drop_data.location.clone(),
881                     target: next,
882                     unwind: Some(on_diverge.unwrap_or(resume_block))
883                 });
884                 block = next;
885             }
886             DropKind::Storage => {}
887         }
888
889         // We do not need to emit StorageDead for generator drops
890         if generator_drop {
891             continue
892         }
893
894         // Drop the storage for both value and storage drops.
895         // Only temps and vars need their storage dead.
896         match drop_data.location {
897             Place::Local(index) if index.index() > arg_count => {
898                 cfg.push(block, Statement {
899                     source_info,
900                     kind: StatementKind::StorageDead(index)
901                 });
902             }
903             _ => continue
904         }
905     }
906     block.unit()
907 }
908
909 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
910                                        cfg: &mut CFG<'tcx>,
911                                        span: Span,
912                                        scope: &mut Scope<'tcx>,
913                                        mut target: BasicBlock,
914                                        generator_drop: bool)
915                                        -> BasicBlock
916 {
917     // Build up the drops in **reverse** order. The end result will
918     // look like:
919     //
920     //    [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
921     //    |                                                         |
922     //    +---------------------------------------------------------+
923     //     code for scope
924     //
925     // The code in this function reads from right to left. At each
926     // point, we check for cached blocks representing the
927     // remainder. If everything is cached, we'll just walk right to
928     // left reading the cached results but never create anything.
929
930     let visibility_scope = scope.visibility_scope;
931     let source_info = |span| SourceInfo {
932         span,
933         scope: visibility_scope
934     };
935
936     // Next, build up the drops. Here we iterate the vector in
937     // *forward* order, so that we generate drops[0] first (right to
938     // left in diagram above).
939     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
940         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
941         // Only full value drops are emitted in the diverging path,
942         // not StorageDead.
943         //
944         // Note: This may not actually be what we desire (are we
945         // "freeing" stack storage as we unwind, or merely observing a
946         // frozen stack)? In particular, the intent may have been to
947         // match the behavior of clang, but on inspection eddyb says
948         // this is not what clang does.
949         let cached_block = match drop_data.kind {
950             DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
951             DropKind::Storage => continue
952         };
953         target = if let Some(cached_block) = *cached_block {
954             cached_block
955         } else {
956             let block = cfg.start_new_cleanup_block();
957             cfg.terminate(block, source_info(drop_data.span),
958                           TerminatorKind::Drop {
959                               location: drop_data.location.clone(),
960                               target,
961                               unwind: None
962                           });
963             *cached_block = Some(block);
964             block
965         };
966     }
967
968     // Finally, push the EndRegion block, used by mir-borrowck, and set
969     // `cached_unwind` to point to it (Block becomes trivial goto after
970     // pass that removes all EndRegions).
971     target = {
972         let cached_block = scope.cached_unwind.ref_mut(generator_drop);
973         if let Some(cached_block) = *cached_block {
974             cached_block
975         } else {
976             let block = cfg.start_new_cleanup_block();
977             cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
978             cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
979             *cached_block = Some(block);
980             block
981         }
982     };
983
984     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
985
986     target
987 }