]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
Auto merge of #44822 - frewsxcv:frewsxcv-eprintln, r=Kimundi
[rust.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `region::Scope`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```
51 # let cond = true;
52 loop {
53     let x = ..;
54     if cond { break; }
55     let y = ..;
56 }
57 ```
58
59 When processing the `let x`, we will add one drop to the scope for
60 `x`.  The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
63
64 ### Early exit
65
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
72 also add all drops.
73
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
80 result.
81
82 ### Loop scopes
83
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
86 should go to.
87
88 */
89
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
91 use hair::LintLevel;
92 use rustc::middle::region;
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::hir::def_id::LOCAL_CRATE;
95 use rustc::mir::*;
96 use rustc::mir::transform::MirSource;
97 use syntax_pos::{Span};
98 use rustc_data_structures::indexed_vec::Idx;
99 use rustc_data_structures::fx::FxHashMap;
100
101 #[derive(Debug)]
102 pub struct Scope<'tcx> {
103     /// The visibility scope this scope was created in.
104     visibility_scope: VisibilityScope,
105
106     /// the region span of this scope within source code.
107     region_scope: region::Scope,
108
109     /// the span of that region_scope
110     region_scope_span: Span,
111
112     /// Whether there's anything to do for the cleanup path, that is,
113     /// when unwinding through this scope. This includes destructors,
114     /// but not StorageDead statements, which don't get emitted at all
115     /// for unwinding, for several reasons:
116     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
117     ///  * LLVM's memory dependency analysis can't handle it atm
118     ///  * polluting the cleanup MIR with StorageDead creates
119     ///    landing pads even though there's no actual destructors
120     ///  * freeing up stack space has no effect during unwinding
121     needs_cleanup: bool,
122
123     /// set of lvalues to drop when exiting this scope. This starts
124     /// out empty but grows as variables are declared during the
125     /// building process. This is a stack, so we always drop from the
126     /// end of the vector (top of the stack) first.
127     drops: Vec<DropData<'tcx>>,
128
129     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
130     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
131
132     /// The cache for drop chain on "generator drop" exit.
133     cached_generator_drop: Option<BasicBlock>,
134 }
135
136 #[derive(Debug)]
137 struct DropData<'tcx> {
138     /// span where drop obligation was incurred (typically where lvalue was declared)
139     span: Span,
140
141     /// lvalue to drop
142     location: Lvalue<'tcx>,
143
144     /// Whether this is a full value Drop, or just a StorageDead.
145     kind: DropKind
146 }
147
148 #[derive(Debug, Default, Clone, Copy)]
149 struct CachedBlock {
150     /// The cached block for the cleanups-on-diverge path. This block
151     /// contains code to run the current drop and all the preceding
152     /// drops (i.e. those having lower index in Drop’s Scope drop
153     /// array)
154     unwind: Option<BasicBlock>,
155
156     /// The cached block for unwinds during cleanups-on-generator-drop path
157     generator_drop: Option<BasicBlock>,
158 }
159
160 #[derive(Debug)]
161 enum DropKind {
162     Value {
163         cached_block: CachedBlock,
164     },
165     Storage
166 }
167
168 #[derive(Clone, Debug)]
169 pub struct BreakableScope<'tcx> {
170     /// Region scope of the loop
171     pub region_scope: region::Scope,
172     /// Where the body of the loop begins. `None` if block
173     pub continue_block: Option<BasicBlock>,
174     /// Block to branch into when the loop or block terminates (either by being `break`-en out
175     /// from, or by having its condition to become false)
176     pub break_block: BasicBlock,
177     /// The destination of the loop/block expression itself (i.e. where to put the result of a
178     /// `break` expression)
179     pub break_destination: Lvalue<'tcx>,
180 }
181
182 impl CachedBlock {
183     fn invalidate(&mut self) {
184         self.generator_drop = None;
185         self.unwind = None;
186     }
187
188     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
189         if generator_drop {
190             self.generator_drop
191         } else {
192             self.unwind
193         }
194     }
195
196     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
197         if generator_drop {
198             &mut self.generator_drop
199         } else {
200             &mut self.unwind
201         }
202     }
203 }
204
205 impl DropKind {
206     fn may_panic(&self) -> bool {
207         match *self {
208             DropKind::Value { .. } => true,
209             DropKind::Storage => false
210         }
211     }
212 }
213
214 impl<'tcx> Scope<'tcx> {
215     /// Invalidate all the cached blocks in the scope.
216     ///
217     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
218     /// larger extent of code.
219     ///
220     /// `unwind` controls whether caches for the unwind branch are also invalidated.
221     fn invalidate_cache(&mut self, unwind: bool) {
222         self.cached_exits.clear();
223         if !unwind { return; }
224         for dropdata in &mut self.drops {
225             if let DropKind::Value { ref mut cached_block } = dropdata.kind {
226                 cached_block.invalidate();
227             }
228         }
229     }
230
231     /// Returns the cached entrypoint for diverging exit from this scope.
232     ///
233     /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
234     /// this method to work correctly.
235     fn cached_block(&self, generator_drop: bool) -> Option<BasicBlock> {
236         let mut drops = self.drops.iter().rev().filter_map(|data| {
237             match data.kind {
238                 DropKind::Value { cached_block } => {
239                     Some(cached_block.get(generator_drop))
240                 }
241                 DropKind::Storage => None
242             }
243         });
244         if let Some(cached_block) = drops.next() {
245             Some(cached_block.expect("drop cache is not filled"))
246         } else {
247             None
248         }
249     }
250
251     /// Given a span and this scope's visibility scope, make a SourceInfo.
252     fn source_info(&self, span: Span) -> SourceInfo {
253         SourceInfo {
254             span,
255             scope: self.visibility_scope
256         }
257     }
258 }
259
260 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
261     // Adding and removing scopes
262     // ==========================
263     /// Start a breakable scope, which tracks where `continue` and `break`
264     /// should branch to. See module comment for more details.
265     ///
266     /// Returns the might_break attribute of the BreakableScope used.
267     pub fn in_breakable_scope<F, R>(&mut self,
268                                     loop_block: Option<BasicBlock>,
269                                     break_block: BasicBlock,
270                                     break_destination: Lvalue<'tcx>,
271                                     f: F) -> R
272         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
273     {
274         let region_scope = self.topmost_scope();
275         let scope = BreakableScope {
276             region_scope,
277             continue_block: loop_block,
278             break_block,
279             break_destination,
280         };
281         self.breakable_scopes.push(scope);
282         let res = f(self);
283         let breakable_scope = self.breakable_scopes.pop().unwrap();
284         assert!(breakable_scope.region_scope == region_scope);
285         res
286     }
287
288     pub fn in_opt_scope<F, R>(&mut self,
289                               opt_scope: Option<(region::Scope, SourceInfo)>,
290                               mut block: BasicBlock,
291                               f: F)
292                               -> BlockAnd<R>
293         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
294     {
295         debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
296         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
297         let rv = unpack!(block = f(self));
298         if let Some(region_scope) = opt_scope {
299             unpack!(block = self.pop_scope(region_scope, block));
300         }
301         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
302         block.and(rv)
303     }
304
305     /// Convenience wrapper that pushes a scope and then executes `f`
306     /// to build its contents, popping the scope afterwards.
307     pub fn in_scope<F, R>(&mut self,
308                           region_scope: (region::Scope, SourceInfo),
309                           lint_level: LintLevel,
310                           mut block: BasicBlock,
311                           f: F)
312                           -> BlockAnd<R>
313         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
314     {
315         debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
316         let visibility_scope = self.visibility_scope;
317         let tcx = self.hir.tcx();
318         if let LintLevel::Explicit(node_id) = lint_level {
319             let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
320                 let sets = tcx.lint_levels(LOCAL_CRATE);
321                 let parent_hir_id =
322                     tcx.hir.definitions().node_to_hir_id(
323                         self.visibility_scope_info[visibility_scope].lint_root
324                             );
325                 let current_hir_id =
326                     tcx.hir.definitions().node_to_hir_id(node_id);
327                 sets.lint_level_set(parent_hir_id) ==
328                     sets.lint_level_set(current_hir_id)
329             });
330
331             if !same_lint_scopes {
332                 self.visibility_scope =
333                     self.new_visibility_scope(region_scope.1.span, lint_level,
334                                               None);
335             }
336         }
337         self.push_scope(region_scope);
338         let rv = unpack!(block = f(self));
339         unpack!(block = self.pop_scope(region_scope, block));
340         self.visibility_scope = visibility_scope;
341         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
342         block.and(rv)
343     }
344
345     /// Push a scope onto the stack. You can then build code in this
346     /// scope and call `pop_scope` afterwards. Note that these two
347     /// calls must be paired; using `in_scope` as a convenience
348     /// wrapper maybe preferable.
349     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
350         debug!("push_scope({:?})", region_scope);
351         let vis_scope = self.visibility_scope;
352         self.scopes.push(Scope {
353             visibility_scope: vis_scope,
354             region_scope: region_scope.0,
355             region_scope_span: region_scope.1.span,
356             needs_cleanup: false,
357             drops: vec![],
358             cached_generator_drop: None,
359             cached_exits: FxHashMap()
360         });
361     }
362
363     /// Pops a scope, which should have region scope `region_scope`,
364     /// adding any drops onto the end of `block` that are needed.
365     /// This must match 1-to-1 with `push_scope`.
366     pub fn pop_scope(&mut self,
367                      region_scope: (region::Scope, SourceInfo),
368                      mut block: BasicBlock)
369                      -> BlockAnd<()> {
370         debug!("pop_scope({:?}, {:?})", region_scope, block);
371         // If we are emitting a `drop` statement, we need to have the cached
372         // diverge cleanup pads ready in case that drop panics.
373         let may_panic =
374             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
375         if may_panic {
376             self.diverge_cleanup();
377         }
378         let scope = self.scopes.pop().unwrap();
379         assert_eq!(scope.region_scope, region_scope.0);
380
381         self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
382         unpack!(block = build_scope_drops(&mut self.cfg,
383                                           &scope,
384                                           &self.scopes,
385                                           block,
386                                           self.arg_count,
387                                           false));
388
389         block.unit()
390     }
391
392
393     /// Branch out of `block` to `target`, exiting all scopes up to
394     /// and including `region_scope`.  This will insert whatever drops are
395     /// needed, as well as tracking this exit for the SEME region. See
396     /// module comment for details.
397     pub fn exit_scope(&mut self,
398                       span: Span,
399                       region_scope: (region::Scope, SourceInfo),
400                       mut block: BasicBlock,
401                       target: BasicBlock) {
402         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
403                region_scope, block, target);
404         let scope_count = 1 + self.scopes.iter().rev()
405             .position(|scope| scope.region_scope == region_scope.0)
406             .unwrap_or_else(|| {
407                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
408             });
409         let len = self.scopes.len();
410         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
411
412         // If we are emitting a `drop` statement, we need to have the cached
413         // diverge cleanup pads ready in case that drop panics.
414         let may_panic = self.scopes[(len - scope_count)..].iter()
415             .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
416         if may_panic {
417             self.diverge_cleanup();
418         }
419
420         {
421         let mut rest = &mut self.scopes[(len - scope_count)..];
422         while let Some((scope, rest_)) = {rest}.split_last_mut() {
423             rest = rest_;
424             block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
425                 self.cfg.terminate(block, scope.source_info(span),
426                                    TerminatorKind::Goto { target: e });
427                 return;
428             } else {
429                 let b = self.cfg.start_new_block();
430                 self.cfg.terminate(block, scope.source_info(span),
431                                    TerminatorKind::Goto { target: b });
432                 scope.cached_exits.insert((target, region_scope.0), b);
433                 b
434             };
435
436             // End all regions for scopes out of which we are breaking.
437             self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
438
439             unpack!(block = build_scope_drops(&mut self.cfg,
440                                               scope,
441                                               rest,
442                                               block,
443                                               self.arg_count,
444                                               false));
445         }
446         }
447         let scope = &self.scopes[len - scope_count];
448         self.cfg.terminate(block, scope.source_info(span),
449                            TerminatorKind::Goto { target: target });
450     }
451
452     /// Creates a path that performs all required cleanup for dropping a generator.
453     ///
454     /// This path terminates in GeneratorDrop. Returns the start of the path.
455     /// None indicates there’s no cleanup to do at this point.
456     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
457         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
458             return None;
459         }
460
461         // Fill in the cache
462         self.diverge_cleanup_gen(true);
463
464         let src_info = self.scopes[0].source_info(self.fn_span);
465         let mut block = self.cfg.start_new_block();
466         let result = block;
467         let mut rest = &mut self.scopes[..];
468
469         while let Some((scope, rest_)) = {rest}.split_last_mut() {
470             rest = rest_;
471             if !scope.needs_cleanup {
472                 continue;
473             }
474             block = if let Some(b) = scope.cached_generator_drop {
475                 self.cfg.terminate(block, src_info,
476                                    TerminatorKind::Goto { target: b });
477                 return Some(result);
478             } else {
479                 let b = self.cfg.start_new_block();
480                 scope.cached_generator_drop = Some(b);
481                 self.cfg.terminate(block, src_info,
482                                    TerminatorKind::Goto { target: b });
483                 b
484             };
485             unpack!(block = build_scope_drops(&mut self.cfg,
486                                               scope,
487                                               rest,
488                                               block,
489                                               self.arg_count,
490                                               true));
491
492             // End all regions for scopes out of which we are breaking.
493             self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
494         }
495
496         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
497
498         Some(result)
499     }
500
501     /// Creates a new visibility scope, nested in the current one.
502     pub fn new_visibility_scope(&mut self,
503                                 span: Span,
504                                 lint_level: LintLevel,
505                                 safety: Option<Safety>) -> VisibilityScope {
506         let parent = self.visibility_scope;
507         debug!("new_visibility_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
508                span, lint_level, safety,
509                parent, self.visibility_scope_info.get(parent));
510         let scope = self.visibility_scopes.push(VisibilityScopeData {
511             span,
512             parent_scope: Some(parent),
513         });
514         let scope_info = VisibilityScopeInfo {
515             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
516                 lint_root
517             } else {
518                 self.visibility_scope_info[parent].lint_root
519             },
520             safety: safety.unwrap_or_else(|| {
521                 self.visibility_scope_info[parent].safety
522             })
523         };
524         self.visibility_scope_info.push(scope_info);
525         scope
526     }
527
528     // Finding scopes
529     // ==============
530     /// Finds the breakable scope for a given label. This is used for
531     /// resolving `break` and `continue`.
532     pub fn find_breakable_scope(&mut self,
533                            span: Span,
534                            label: region::Scope)
535                            -> &mut BreakableScope<'tcx> {
536         // find the loop-scope with the correct id
537         self.breakable_scopes.iter_mut()
538             .rev()
539             .filter(|breakable_scope| breakable_scope.region_scope == label)
540             .next()
541             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
542     }
543
544     /// Given a span and the current visibility scope, make a SourceInfo.
545     pub fn source_info(&self, span: Span) -> SourceInfo {
546         SourceInfo {
547             span,
548             scope: self.visibility_scope
549         }
550     }
551
552     /// Returns the `region::Scope` of the scope which should be exited by a
553     /// return.
554     pub fn region_scope_of_return_scope(&self) -> region::Scope {
555         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
556         // We want `scopes[1]`, which is the `ParameterScope`.
557         assert!(self.scopes.len() >= 2);
558         assert!(match self.scopes[1].region_scope.data() {
559             region::ScopeData::Arguments(_) => true,
560             _ => false,
561         });
562         self.scopes[1].region_scope
563     }
564
565     /// Returns the topmost active scope, which is known to be alive until
566     /// the next scope expression.
567     pub fn topmost_scope(&self) -> region::Scope {
568         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
569     }
570
571     /// Returns the scope that we should use as the lifetime of an
572     /// operand. Basically, an operand must live until it is consumed.
573     /// This is similar to, but not quite the same as, the temporary
574     /// scope (which can be larger or smaller).
575     ///
576     /// Consider:
577     ///
578     ///     let x = foo(bar(X, Y));
579     ///
580     /// We wish to pop the storage for X and Y after `bar()` is
581     /// called, not after the whole `let` is completed.
582     ///
583     /// As another example, if the second argument diverges:
584     ///
585     ///     foo(Box::new(2), panic!())
586     ///
587     /// We would allocate the box but then free it on the unwinding
588     /// path; we would also emit a free on the 'success' path from
589     /// panic, but that will turn out to be removed as dead-code.
590     ///
591     /// When building statics/constants, returns `None` since
592     /// intermediate values do not have to be dropped in that case.
593     pub fn local_scope(&self) -> Option<region::Scope> {
594         match self.hir.src {
595             MirSource::Const(_) |
596             MirSource::Static(..) =>
597                 // No need to free storage in this context.
598                 None,
599             MirSource::Fn(_) =>
600                 Some(self.topmost_scope()),
601             MirSource::Promoted(..) |
602             MirSource::GeneratorDrop(..) =>
603                 bug!(),
604         }
605     }
606
607     // Scheduling drops
608     // ================
609     /// Indicates that `lvalue` should be dropped on exit from
610     /// `region_scope`.
611     pub fn schedule_drop(&mut self,
612                          span: Span,
613                          region_scope: region::Scope,
614                          lvalue: &Lvalue<'tcx>,
615                          lvalue_ty: Ty<'tcx>) {
616         let needs_drop = self.hir.needs_drop(lvalue_ty);
617         let drop_kind = if needs_drop {
618             DropKind::Value { cached_block: CachedBlock::default() }
619         } else {
620             // Only temps and vars need their storage dead.
621             match *lvalue {
622                 Lvalue::Local(index) if index.index() > self.arg_count => DropKind::Storage,
623                 _ => return
624             }
625         };
626
627         for scope in self.scopes.iter_mut().rev() {
628             let this_scope = scope.region_scope == region_scope;
629             // When building drops, we try to cache chains of drops in such a way so these drops
630             // could be reused by the drops which would branch into the cached (already built)
631             // blocks.  This, however, means that whenever we add a drop into a scope which already
632             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
633             // might branch into the scope which had a drop just added to it. This is necessary,
634             // because otherwise some other code might use the cache to branch into already built
635             // chain of drops, essentially ignoring the newly added drop.
636             //
637             // For example consider there’s two scopes with a drop in each. These are built and
638             // thus the caches are filled:
639             //
640             // +--------------------------------------------------------+
641             // | +---------------------------------+                    |
642             // | | +--------+     +-------------+  |  +---------------+ |
643             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
644             // | | +--------+     +-------------+  |  +---------------+ |
645             // | +------------|outer_scope cache|--+                    |
646             // +------------------------------|middle_scope cache|------+
647             //
648             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
649             // outer-most scopes:
650             //
651             // +------------------------------------------------------------+
652             // | +----------------------------------+                       |
653             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
654             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
655             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
656             // | |             +-+ +-------------+  |                       |
657             // | +---|invalid outer_scope cache|----+                       |
658             // +----=----------------|invalid middle_scope cache|-----------+
659             //
660             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
661             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
662             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
663             // wrong results.
664             //
665             // The cache and its invalidation for unwind branch is somewhat special. The cache is
666             // per-drop, rather than per scope, which has a several different implications. Adding
667             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
668             // scope. That is true, because none of the already existing drops will have an edge
669             // into a block with the newly added drop.
670             //
671             // Note that this code iterates scopes from the inner-most to the outer-most,
672             // invalidating caches of each scope visited. This way bare minimum of the
673             // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
674             // cache of outer scpoe stays intact.
675             let invalidate_unwind = needs_drop && !this_scope;
676             scope.invalidate_cache(invalidate_unwind);
677             if this_scope {
678                 if let DropKind::Value { .. } = drop_kind {
679                     scope.needs_cleanup = true;
680                 }
681                 let region_scope_span = region_scope.span(self.hir.tcx(),
682                                                           &self.hir.region_scope_tree);
683                 // Attribute scope exit drops to scope's closing brace
684                 let scope_end = region_scope_span.with_lo(region_scope_span.hi());
685                 scope.drops.push(DropData {
686                     span: scope_end,
687                     location: lvalue.clone(),
688                     kind: drop_kind
689                 });
690                 return;
691             }
692         }
693         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, lvalue);
694     }
695
696     // Other
697     // =====
698     /// Creates a path that performs all required cleanup for unwinding.
699     ///
700     /// This path terminates in Resume. Returns the start of the path.
701     /// See module comment for more details. None indicates there’s no
702     /// cleanup to do at this point.
703     pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
704         self.diverge_cleanup_gen(false)
705     }
706
707     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> {
708         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
709             return None;
710         }
711         assert!(!self.scopes.is_empty()); // or `any` above would be false
712
713         let Builder { ref mut cfg, ref mut scopes,
714                       ref mut cached_resume_block, .. } = *self;
715
716         // Build up the drops in **reverse** order. The end result will
717         // look like:
718         //
719         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
720         //
721         // However, we build this in **reverse order**. That is, we
722         // process scopes[0], then scopes[1], etc, pointing each one at
723         // the result generates from the one before. Along the way, we
724         // store caches. If everything is cached, we'll just walk right
725         // to left reading the cached results but never created anything.
726
727         // To start, create the resume terminator.
728         let mut target = if let Some(target) = *cached_resume_block {
729             target
730         } else {
731             let resumeblk = cfg.start_new_cleanup_block();
732             cfg.terminate(resumeblk,
733                           scopes[0].source_info(self.fn_span),
734                           TerminatorKind::Resume);
735             *cached_resume_block = Some(resumeblk);
736             resumeblk
737         };
738
739         for scope in scopes.iter_mut() {
740             target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
741                                          scope, target, generator_drop);
742         }
743         Some(target)
744     }
745
746     /// Utility function for *non*-scope code to build their own drops
747     pub fn build_drop(&mut self,
748                       block: BasicBlock,
749                       span: Span,
750                       location: Lvalue<'tcx>,
751                       ty: Ty<'tcx>) -> BlockAnd<()> {
752         if !self.hir.needs_drop(ty) {
753             return block.unit();
754         }
755         let source_info = self.source_info(span);
756         let next_target = self.cfg.start_new_block();
757         let diverge_target = self.diverge_cleanup();
758         self.cfg.terminate(block, source_info,
759                            TerminatorKind::Drop {
760                                location,
761                                target: next_target,
762                                unwind: diverge_target,
763                            });
764         next_target.unit()
765     }
766
767     /// Utility function for *non*-scope code to build their own drops
768     pub fn build_drop_and_replace(&mut self,
769                                   block: BasicBlock,
770                                   span: Span,
771                                   location: Lvalue<'tcx>,
772                                   value: Operand<'tcx>) -> BlockAnd<()> {
773         let source_info = self.source_info(span);
774         let next_target = self.cfg.start_new_block();
775         let diverge_target = self.diverge_cleanup();
776         self.cfg.terminate(block, source_info,
777                            TerminatorKind::DropAndReplace {
778                                location,
779                                value,
780                                target: next_target,
781                                unwind: diverge_target,
782                            });
783         next_target.unit()
784     }
785
786     /// Create an Assert terminator and return the success block.
787     /// If the boolean condition operand is not the expected value,
788     /// a runtime panic will be caused with the given message.
789     pub fn assert(&mut self, block: BasicBlock,
790                   cond: Operand<'tcx>,
791                   expected: bool,
792                   msg: AssertMessage<'tcx>,
793                   span: Span)
794                   -> BasicBlock {
795         let source_info = self.source_info(span);
796
797         let success_block = self.cfg.start_new_block();
798         let cleanup = self.diverge_cleanup();
799
800         self.cfg.terminate(block, source_info,
801                            TerminatorKind::Assert {
802                                cond,
803                                expected,
804                                msg,
805                                target: success_block,
806                                cleanup,
807                            });
808
809         success_block
810     }
811 }
812
813 /// Builds drops for pop_scope and exit_scope.
814 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
815                            scope: &Scope<'tcx>,
816                            earlier_scopes: &[Scope<'tcx>],
817                            mut block: BasicBlock,
818                            arg_count: usize,
819                            generator_drop: bool)
820                            -> BlockAnd<()> {
821     debug!("build_scope_drops({:?} -> {:?})", block, scope);
822     let mut iter = scope.drops.iter().rev().peekable();
823     while let Some(drop_data) = iter.next() {
824         let source_info = scope.source_info(drop_data.span);
825         match drop_data.kind {
826             DropKind::Value { .. } => {
827                 // Try to find the next block with its cached block
828                 // for us to diverge into in case the drop panics.
829                 let on_diverge = iter.peek().iter().filter_map(|dd| {
830                     match dd.kind {
831                         DropKind::Value { cached_block } => {
832                             let result = cached_block.get(generator_drop);
833                             if result.is_none() {
834                                 span_bug!(drop_data.span, "cached block not present?")
835                             }
836                             result
837                         },
838                         DropKind::Storage => None
839                     }
840                 }).next();
841                 // If there’s no `cached_block`s within current scope,
842                 // we must look for one in the enclosing scope.
843                 let on_diverge = on_diverge.or_else(|| {
844                     earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next()
845                 });
846                 let next = cfg.start_new_block();
847                 cfg.terminate(block, source_info, TerminatorKind::Drop {
848                     location: drop_data.location.clone(),
849                     target: next,
850                     unwind: on_diverge
851                 });
852                 block = next;
853             }
854             DropKind::Storage => {}
855         }
856
857         // We do not need to emit StorageDead for generator drops
858         if generator_drop {
859             continue
860         }
861
862         // Drop the storage for both value and storage drops.
863         // Only temps and vars need their storage dead.
864         match drop_data.location {
865             Lvalue::Local(index) if index.index() > arg_count => {
866                 cfg.push(block, Statement {
867                     source_info,
868                     kind: StatementKind::StorageDead(index)
869                 });
870             }
871             _ => continue
872         }
873     }
874     block.unit()
875 }
876
877 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
878                                        cfg: &mut CFG<'tcx>,
879                                        span: Span,
880                                        scope: &mut Scope<'tcx>,
881                                        mut target: BasicBlock,
882                                        generator_drop: bool)
883                                        -> BasicBlock
884 {
885     // Build up the drops in **reverse** order. The end result will
886     // look like:
887     //
888     //    [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
889     //    |                                                         |
890     //    +---------------------------------------------------------+
891     //     code for scope
892     //
893     // The code in this function reads from right to left. At each
894     // point, we check for cached blocks representing the
895     // remainder. If everything is cached, we'll just walk right to
896     // left reading the cached results but never create anything.
897
898     let visibility_scope = scope.visibility_scope;
899     let source_info = |span| SourceInfo {
900         span,
901         scope: visibility_scope
902     };
903
904     // Next, build up the drops. Here we iterate the vector in
905     // *forward* order, so that we generate drops[0] first (right to
906     // left in diagram above).
907     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
908         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
909         // Only full value drops are emitted in the diverging path,
910         // not StorageDead.
911         //
912         // Note: This may not actually be what we desire (are we
913         // "freeing" stack storage as we unwind, or merely observing a
914         // frozen stack)? In particular, the intent may have been to
915         // match the behavior of clang, but on inspection eddyb says
916         // this is not what clang does.
917         let cached_block = match drop_data.kind {
918             DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
919             DropKind::Storage => continue
920         };
921         target = if let Some(cached_block) = *cached_block {
922             cached_block
923         } else {
924             let block = cfg.start_new_cleanup_block();
925             cfg.terminate(block, source_info(drop_data.span),
926                           TerminatorKind::Drop {
927                               location: drop_data.location.clone(),
928                               target,
929                               unwind: None
930                           });
931             *cached_block = Some(block);
932             block
933         };
934     }
935
936     // Finally, push the EndRegion block, used by mir-borrowck. (Block
937     // becomes trivial goto after pass that removes all EndRegions.)
938     {
939         let block = cfg.start_new_cleanup_block();
940         cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
941         cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
942         target = block
943     }
944
945     target
946 }