]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
Auto merge of #60174 - matthewjasper:add-match-arm-scopes, r=pnkfelix
[rust.git] / src / librustc_mir / build / scope.rs
1 /*!
2 Managing the scope stack. The scopes are tied to lexical scopes, so as
3 we descend the HAIR, we push a scope on the stack, build its
4 contents, and then pop it off. Every scope is named by a
5 `region::Scope`.
6
7 ### SEME Regions
8
9 When pushing a new scope, we record the current point in the graph (a
10 basic block); this marks the entry to the scope. We then generate more
11 stuff in the control-flow graph. Whenever the scope is exited, either
12 via a `break` or `return` or just by fallthrough, that marks an exit
13 from the scope. Each lexical scope thus corresponds to a single-entry,
14 multiple-exit (SEME) region in the control-flow graph.
15
16 For now, we keep a mapping from each `region::Scope` to its
17 corresponding SEME region for later reference (see caveat in next
18 paragraph). This is because region scopes are tied to
19 them. Eventually, when we shift to non-lexical lifetimes, there should
20 be no need to remember this mapping.
21
22 ### Not so SEME Regions
23
24 In the course of building matches, it sometimes happens that certain code
25 (namely guards) gets executed multiple times. This means that the scope lexical
26 scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
27 mapping is from one scope to a vector of SEME regions.
28
29 Also in matches, the scopes assigned to arms are not even SEME regions! Each
30 arm has a single region with one entry for each pattern. We manually
31 manipulate the scheduled drops in this scope to avoid dropping things multiple
32 times, although drop elaboration would clean this up for value drops.
33
34 ### Drops
35
36 The primary purpose for scopes is to insert drops: while building
37 the contents, we also accumulate places that need to be dropped upon
38 exit from each scope. This is done by calling `schedule_drop`. Once a
39 drop is scheduled, whenever we branch out we will insert drops of all
40 those places onto the outgoing edge. Note that we don't know the full
41 set of scheduled drops up front, and so whenever we exit from the
42 scope we only drop the values scheduled thus far. For example, consider
43 the scope S corresponding to this loop:
44
45 ```
46 # let cond = true;
47 loop {
48     let x = ..;
49     if cond { break; }
50     let y = ..;
51 }
52 ```
53
54 When processing the `let x`, we will add one drop to the scope for
55 `x`.  The break will then insert a drop for `x`. When we process `let
56 y`, we will add another drop (in fact, to a subscope, but let's ignore
57 that for now); any later drops would also drop `y`.
58
59 ### Early exit
60
61 There are numerous "normal" ways to early exit a scope: `break`,
62 `continue`, `return` (panics are handled separately). Whenever an
63 early exit occurs, the method `exit_scope` is called. It is given the
64 current point in execution where the early exit occurs, as well as the
65 scope you want to branch to (note that all early exits from to some
66 other enclosing scope). `exit_scope` will record this exit point and
67 also add all drops.
68
69 Panics are handled in a similar fashion, except that a panic always
70 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
71 `panic(p)` with the current point `p`. Or else you can call
72 `diverge_cleanup`, which will produce a block that you can branch to
73 which does the appropriate cleanup and then diverges. `panic(p)`
74 simply calls `diverge_cleanup()` and adds an edge from `p` to the
75 result.
76
77 ### Loop scopes
78
79 In addition to the normal scope stack, we track a loop scope stack
80 that contains only loops. It tracks where a `break` and `continue`
81 should go to.
82
83 */
84
85 use crate::build::{BlockAnd, BlockAndExtension, Builder, CFG};
86 use crate::hair::LintLevel;
87 use rustc::middle::region;
88 use rustc::ty::Ty;
89 use rustc::hir;
90 use rustc::mir::*;
91 use syntax_pos::{Span, DUMMY_SP};
92 use rustc_data_structures::fx::FxHashMap;
93 use std::collections::hash_map::Entry;
94 use std::mem;
95
96 #[derive(Debug)]
97 pub struct Scope<'tcx> {
98     /// The source scope this scope was created in.
99     source_scope: SourceScope,
100
101     /// the region span of this scope within source code.
102     region_scope: region::Scope,
103
104     /// the span of that region_scope
105     region_scope_span: Span,
106
107     /// Whether there's anything to do for the cleanup path, that is,
108     /// when unwinding through this scope. This includes destructors,
109     /// but not StorageDead statements, which don't get emitted at all
110     /// for unwinding, for several reasons:
111     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
112     ///  * LLVM's memory dependency analysis can't handle it atm
113     ///  * polluting the cleanup MIR with StorageDead creates
114     ///    landing pads even though there's no actual destructors
115     ///  * freeing up stack space has no effect during unwinding
116     /// Note that for generators we do emit StorageDeads, for the
117     /// use of optimizations in the MIR generator transform.
118     needs_cleanup: bool,
119
120     /// set of places to drop when exiting this scope. This starts
121     /// out empty but grows as variables are declared during the
122     /// building process. This is a stack, so we always drop from the
123     /// end of the vector (top of the stack) first.
124     drops: Vec<DropData<'tcx>>,
125
126     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
127     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
128
129     /// The cache for drop chain on "generator drop" exit.
130     cached_generator_drop: Option<BasicBlock>,
131
132     /// The cache for drop chain on "unwind" exit.
133     cached_unwind: CachedBlock,
134 }
135
136 #[derive(Debug)]
137 struct DropData<'tcx> {
138     /// span where drop obligation was incurred (typically where place was declared)
139     span: Span,
140
141     /// place to drop
142     location: Place<'tcx>,
143
144     /// Whether this is a value Drop or a StorageDead.
145     kind: DropKind,
146 }
147
148 #[derive(Debug, Default, Clone, Copy)]
149 pub(crate) struct CachedBlock {
150     /// The cached block for the cleanups-on-diverge path. This block
151     /// contains code to run the current drop and all the preceding
152     /// drops (i.e., those having lower index in Drop’s Scope drop
153     /// array)
154     unwind: Option<BasicBlock>,
155
156     /// The cached block for unwinds during cleanups-on-generator-drop path
157     ///
158     /// This is split from the standard unwind path here to prevent drop
159     /// elaboration from creating drop flags that would have to be captured
160     /// by the generator. I'm not sure how important this optimization is,
161     /// but it is here.
162     generator_drop: Option<BasicBlock>,
163 }
164
165 #[derive(Debug)]
166 pub(crate) enum DropKind {
167     Value {
168         cached_block: CachedBlock,
169     },
170     Storage
171 }
172
173 #[derive(Clone, Debug)]
174 pub struct BreakableScope<'tcx> {
175     /// Region scope of the loop
176     pub region_scope: region::Scope,
177     /// Where the body of the loop begins. `None` if block
178     pub continue_block: Option<BasicBlock>,
179     /// Block to branch into when the loop or block terminates (either by being `break`-en out
180     /// from, or by having its condition to become false)
181     pub break_block: BasicBlock,
182     /// The destination of the loop/block expression itself (i.e., where to put the result of a
183     /// `break` expression)
184     pub break_destination: Place<'tcx>,
185 }
186
187 impl CachedBlock {
188     fn invalidate(&mut self) {
189         self.generator_drop = None;
190         self.unwind = None;
191     }
192
193     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
194         if generator_drop {
195             self.generator_drop
196         } else {
197             self.unwind
198         }
199     }
200
201     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
202         if generator_drop {
203             &mut self.generator_drop
204         } else {
205             &mut self.unwind
206         }
207     }
208 }
209
210 impl DropKind {
211     fn may_panic(&self) -> bool {
212         match *self {
213             DropKind::Value { .. } => true,
214             DropKind::Storage => false
215         }
216     }
217 }
218
219 impl<'tcx> Scope<'tcx> {
220     /// Invalidates all the cached blocks in the scope.
221     ///
222     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
223     /// larger extent of code.
224     ///
225     /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
226     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
227     /// top-of-scope (as opposed to dependent scopes).
228     fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
229         // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
230         // with lots of `try!`?
231
232         // cached exits drop storage and refer to the top-of-scope
233         self.cached_exits.clear();
234
235         if !storage_only {
236             // the current generator drop and unwind ignore
237             // storage but refer to top-of-scope
238             self.cached_generator_drop = None;
239             self.cached_unwind.invalidate();
240         }
241
242         if !storage_only && !this_scope_only {
243             for drop_data in &mut self.drops {
244                 if let DropKind::Value { ref mut cached_block } = drop_data.kind {
245                     cached_block.invalidate();
246                 }
247             }
248         }
249     }
250
251     /// Given a span and this scope's source scope, make a SourceInfo.
252     fn source_info(&self, span: Span) -> SourceInfo {
253         SourceInfo {
254             span,
255             scope: self.source_scope
256         }
257     }
258 }
259
260 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
261     // Adding and removing scopes
262     // ==========================
263     /// Start a breakable scope, which tracks where `continue` and `break`
264     /// should branch to. See module comment for more details.
265     ///
266     /// Returns the might_break attribute of the BreakableScope used.
267     pub fn in_breakable_scope<F, R>(&mut self,
268                                     loop_block: Option<BasicBlock>,
269                                     break_block: BasicBlock,
270                                     break_destination: Place<'tcx>,
271                                     f: F) -> R
272         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
273     {
274         let region_scope = self.topmost_scope();
275         let scope = BreakableScope {
276             region_scope,
277             continue_block: loop_block,
278             break_block,
279             break_destination,
280         };
281         self.breakable_scopes.push(scope);
282         let res = f(self);
283         let breakable_scope = self.breakable_scopes.pop().unwrap();
284         assert!(breakable_scope.region_scope == region_scope);
285         res
286     }
287
288     pub fn in_opt_scope<F, R>(&mut self,
289                               opt_scope: Option<(region::Scope, SourceInfo)>,
290                               f: F)
291                               -> BlockAnd<R>
292         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
293     {
294         debug!("in_opt_scope(opt_scope={:?})", opt_scope);
295         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
296         let mut block;
297         let rv = unpack!(block = f(self));
298         if let Some(region_scope) = opt_scope {
299             unpack!(block = self.pop_scope(region_scope, block));
300         }
301         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
302         block.and(rv)
303     }
304
305     /// Convenience wrapper that pushes a scope and then executes `f`
306     /// to build its contents, popping the scope afterwards.
307     pub fn in_scope<F, R>(&mut self,
308                           region_scope: (region::Scope, SourceInfo),
309                           lint_level: LintLevel,
310                           f: F)
311                           -> BlockAnd<R>
312         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
313     {
314         debug!("in_scope(region_scope={:?})", region_scope);
315         let source_scope = self.source_scope;
316         let tcx = self.hir.tcx();
317         if let LintLevel::Explicit(current_hir_id) = lint_level {
318             // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
319             // to avoid adding Hir dependences on our parents.
320             // We estimate the true lint roots here to avoid creating a lot of source scopes.
321
322             let parent_root = tcx.maybe_lint_level_root_bounded(
323                 self.source_scope_local_data[source_scope].lint_root,
324                 self.hir.root_lint_level,
325             );
326             let current_root = tcx.maybe_lint_level_root_bounded(
327                 current_hir_id,
328                 self.hir.root_lint_level
329             );
330
331             if parent_root != current_root {
332                 self.source_scope = self.new_source_scope(
333                     region_scope.1.span,
334                     LintLevel::Explicit(current_root),
335                     None
336                 );
337             }
338         }
339         self.push_scope(region_scope);
340         let mut block;
341         let rv = unpack!(block = f(self));
342         unpack!(block = self.pop_scope(region_scope, block));
343         self.source_scope = source_scope;
344         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
345         block.and(rv)
346     }
347
348     /// Push a scope onto the stack. You can then build code in this
349     /// scope and call `pop_scope` afterwards. Note that these two
350     /// calls must be paired; using `in_scope` as a convenience
351     /// wrapper maybe preferable.
352     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
353         debug!("push_scope({:?})", region_scope);
354         let vis_scope = self.source_scope;
355         self.scopes.push(Scope {
356             source_scope: vis_scope,
357             region_scope: region_scope.0,
358             region_scope_span: region_scope.1.span,
359             needs_cleanup: false,
360             drops: vec![],
361             cached_generator_drop: None,
362             cached_exits: Default::default(),
363             cached_unwind: CachedBlock::default(),
364         });
365     }
366
367     /// Pops a scope, which should have region scope `region_scope`,
368     /// adding any drops onto the end of `block` that are needed.
369     /// This must match 1-to-1 with `push_scope`.
370     pub fn pop_scope(&mut self,
371                      region_scope: (region::Scope, SourceInfo),
372                      mut block: BasicBlock)
373                      -> BlockAnd<()> {
374         debug!("pop_scope({:?}, {:?})", region_scope, block);
375         // If we are emitting a `drop` statement, we need to have the cached
376         // diverge cleanup pads ready in case that drop panics.
377         let may_panic =
378             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
379         if may_panic {
380             self.diverge_cleanup();
381         }
382         let scope = self.scopes.pop().unwrap();
383         assert_eq!(scope.region_scope, region_scope.0);
384
385         let unwind_to = self.scopes.last().and_then(|next_scope| {
386             next_scope.cached_unwind.get(false)
387         }).unwrap_or_else(|| self.resume_block());
388
389         unpack!(block = build_scope_drops(
390             &mut self.cfg,
391             &scope,
392             block,
393             unwind_to,
394             self.arg_count,
395             false,
396         ));
397
398         block.unit()
399     }
400
401
402     /// Branch out of `block` to `target`, exiting all scopes up to
403     /// and including `region_scope`. This will insert whatever drops are
404     /// needed. See module comment for details.
405     pub fn exit_scope(&mut self,
406                       span: Span,
407                       region_scope: (region::Scope, SourceInfo),
408                       mut block: BasicBlock,
409                       target: BasicBlock) {
410         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
411                region_scope, block, target);
412         let scope_count = 1 + self.scopes.iter().rev()
413             .position(|scope| scope.region_scope == region_scope.0)
414             .unwrap_or_else(|| {
415                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
416             });
417         let len = self.scopes.len();
418         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
419
420         // If we are emitting a `drop` statement, we need to have the cached
421         // diverge cleanup pads ready in case that drop panics.
422         let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup);
423         if may_panic {
424             self.diverge_cleanup();
425         }
426
427         let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev();
428         let mut scope = scopes.next().unwrap();
429         for next_scope in scopes {
430             if scope.drops.is_empty() {
431                 scope = next_scope;
432                 continue;
433             }
434             let source_info = scope.source_info(span);
435             block = match scope.cached_exits.entry((target, region_scope.0)) {
436                 Entry::Occupied(e) => {
437                     self.cfg.terminate(block, source_info,
438                                     TerminatorKind::Goto { target: *e.get() });
439                     return;
440                 }
441                 Entry::Vacant(v) => {
442                     let b = self.cfg.start_new_block();
443                     self.cfg.terminate(block, source_info,
444                                     TerminatorKind::Goto { target: b });
445                     v.insert(b);
446                     b
447                 }
448             };
449
450             let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| {
451                 debug_assert!(!may_panic, "cached block not present?");
452                 START_BLOCK
453             });
454
455             unpack!(block = build_scope_drops(
456                 &mut self.cfg,
457                 scope,
458                 block,
459                 unwind_to,
460                 self.arg_count,
461                 false,
462             ));
463
464             scope = next_scope;
465         }
466
467         let scope = &self.scopes[len - scope_count];
468         self.cfg.terminate(block, scope.source_info(span),
469                            TerminatorKind::Goto { target });
470     }
471
472     /// Creates a path that performs all required cleanup for dropping a generator.
473     ///
474     /// This path terminates in GeneratorDrop. Returns the start of the path.
475     /// None indicates there’s no cleanup to do at this point.
476     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
477         // Fill in the cache for unwinds
478         self.diverge_cleanup_gen(true);
479
480         let src_info = self.scopes[0].source_info(self.fn_span);
481         let resume_block = self.resume_block();
482         let mut scopes = self.scopes.iter_mut().rev().peekable();
483         let mut block = self.cfg.start_new_block();
484         let result = block;
485
486         while let Some(scope) = scopes.next() {
487             if !scope.needs_cleanup && !self.is_generator {
488                 continue;
489             }
490
491             block = if let Some(b) = scope.cached_generator_drop {
492                 self.cfg.terminate(block, src_info,
493                                    TerminatorKind::Goto { target: b });
494                 return Some(result);
495             } else {
496                 let b = self.cfg.start_new_block();
497                 scope.cached_generator_drop = Some(b);
498                 self.cfg.terminate(block, src_info,
499                                    TerminatorKind::Goto { target: b });
500                 b
501             };
502
503             let unwind_to = scopes.peek().as_ref().map(|scope| {
504                 scope.cached_unwind.get(true).unwrap_or_else(|| {
505                     span_bug!(src_info.span, "cached block not present?")
506                 })
507             }).unwrap_or(resume_block);
508
509             unpack!(block = build_scope_drops(
510                 &mut self.cfg,
511                 scope,
512                 block,
513                 unwind_to,
514                 self.arg_count,
515                 true,
516             ));
517         }
518
519         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
520
521         Some(result)
522     }
523
524     /// Creates a new source scope, nested in the current one.
525     pub fn new_source_scope(&mut self,
526                             span: Span,
527                             lint_level: LintLevel,
528                             safety: Option<Safety>) -> SourceScope {
529         let parent = self.source_scope;
530         debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
531                span, lint_level, safety,
532                parent, self.source_scope_local_data.get(parent));
533         let scope = self.source_scopes.push(SourceScopeData {
534             span,
535             parent_scope: Some(parent),
536         });
537         let scope_local_data = SourceScopeLocalData {
538             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
539                 lint_root
540             } else {
541                 self.source_scope_local_data[parent].lint_root
542             },
543             safety: safety.unwrap_or_else(|| {
544                 self.source_scope_local_data[parent].safety
545             })
546         };
547         self.source_scope_local_data.push(scope_local_data);
548         scope
549     }
550
551     // Finding scopes
552     // ==============
553     /// Finds the breakable scope for a given label. This is used for
554     /// resolving `break` and `continue`.
555     pub fn find_breakable_scope(&self,
556                                 span: Span,
557                                 label: region::Scope)
558                                 -> &BreakableScope<'tcx> {
559         // find the loop-scope with the correct id
560         self.breakable_scopes.iter()
561             .rev()
562             .filter(|breakable_scope| breakable_scope.region_scope == label)
563             .next()
564             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
565     }
566
567     /// Given a span and the current source scope, make a SourceInfo.
568     pub fn source_info(&self, span: Span) -> SourceInfo {
569         SourceInfo {
570             span,
571             scope: self.source_scope
572         }
573     }
574
575     /// Returns the `region::Scope` of the scope which should be exited by a
576     /// return.
577     pub fn region_scope_of_return_scope(&self) -> region::Scope {
578         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
579         // We want `scopes[1]`, which is the `ParameterScope`.
580         assert!(self.scopes.len() >= 2);
581         assert!(match self.scopes[1].region_scope.data {
582             region::ScopeData::Arguments => true,
583             _ => false,
584         });
585         self.scopes[1].region_scope
586     }
587
588     /// Returns the topmost active scope, which is known to be alive until
589     /// the next scope expression.
590     pub fn topmost_scope(&self) -> region::Scope {
591         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
592     }
593
594     /// Returns the scope that we should use as the lifetime of an
595     /// operand. Basically, an operand must live until it is consumed.
596     /// This is similar to, but not quite the same as, the temporary
597     /// scope (which can be larger or smaller).
598     ///
599     /// Consider:
600     ///
601     ///     let x = foo(bar(X, Y));
602     ///
603     /// We wish to pop the storage for X and Y after `bar()` is
604     /// called, not after the whole `let` is completed.
605     ///
606     /// As another example, if the second argument diverges:
607     ///
608     ///     foo(Box::new(2), panic!())
609     ///
610     /// We would allocate the box but then free it on the unwinding
611     /// path; we would also emit a free on the 'success' path from
612     /// panic, but that will turn out to be removed as dead-code.
613     ///
614     /// When building statics/constants, returns `None` since
615     /// intermediate values do not have to be dropped in that case.
616     pub fn local_scope(&self) -> Option<region::Scope> {
617         match self.hir.body_owner_kind {
618             hir::BodyOwnerKind::Const |
619             hir::BodyOwnerKind::Static(_) =>
620                 // No need to free storage in this context.
621                 None,
622             hir::BodyOwnerKind::Closure |
623             hir::BodyOwnerKind::Fn =>
624                 Some(self.topmost_scope()),
625         }
626     }
627
628     // Schedule an abort block - this is used for some ABIs that cannot unwind
629     pub fn schedule_abort(&mut self) -> BasicBlock {
630         self.scopes[0].needs_cleanup = true;
631         let abortblk = self.cfg.start_new_cleanup_block();
632         let source_info = self.scopes[0].source_info(self.fn_span);
633         self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
634         self.cached_resume_block = Some(abortblk);
635         abortblk
636     }
637
638     pub fn schedule_drop_storage_and_value(
639         &mut self,
640         span: Span,
641         region_scope: region::Scope,
642         place: &Place<'tcx>,
643         place_ty: Ty<'tcx>,
644     ) {
645         self.schedule_drop(
646             span, region_scope, place, place_ty,
647             DropKind::Storage,
648         );
649         self.schedule_drop(
650             span, region_scope, place, place_ty,
651             DropKind::Value {
652                 cached_block: CachedBlock::default(),
653             },
654         );
655     }
656
657     // Scheduling drops
658     // ================
659     /// Indicates that `place` should be dropped on exit from
660     /// `region_scope`.
661     ///
662     /// When called with `DropKind::Storage`, `place` should be a local
663     /// with an index higher than the current `self.arg_count`.
664     pub fn schedule_drop(
665         &mut self,
666         span: Span,
667         region_scope: region::Scope,
668         place: &Place<'tcx>,
669         place_ty: Ty<'tcx>,
670         drop_kind: DropKind,
671     ) {
672         let needs_drop = self.hir.needs_drop(place_ty);
673         match drop_kind {
674             DropKind::Value { .. } => if !needs_drop { return },
675             DropKind::Storage => {
676                 match *place {
677                     Place::Base(PlaceBase::Local(index)) => if index.index() <= self.arg_count {
678                         span_bug!(
679                             span, "`schedule_drop` called with index {} and arg_count {}",
680                             index.index(),
681                             self.arg_count,
682                         )
683                     },
684                     _ => span_bug!(
685                         span, "`schedule_drop` called with non-`Local` place {:?}", place
686                     ),
687                 }
688             }
689         }
690
691         for scope in self.scopes.iter_mut().rev() {
692             let this_scope = scope.region_scope == region_scope;
693             // When building drops, we try to cache chains of drops in such a way so these drops
694             // could be reused by the drops which would branch into the cached (already built)
695             // blocks.  This, however, means that whenever we add a drop into a scope which already
696             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
697             // might branch into the scope which had a drop just added to it. This is necessary,
698             // because otherwise some other code might use the cache to branch into already built
699             // chain of drops, essentially ignoring the newly added drop.
700             //
701             // For example consider there’s two scopes with a drop in each. These are built and
702             // thus the caches are filled:
703             //
704             // +--------------------------------------------------------+
705             // | +---------------------------------+                    |
706             // | | +--------+     +-------------+  |  +---------------+ |
707             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
708             // | | +--------+     +-------------+  |  +---------------+ |
709             // | +------------|outer_scope cache|--+                    |
710             // +------------------------------|middle_scope cache|------+
711             //
712             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
713             // outer-most scopes:
714             //
715             // +------------------------------------------------------------+
716             // | +----------------------------------+                       |
717             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
718             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
719             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
720             // | |             +-+ +-------------+  |                       |
721             // | +---|invalid outer_scope cache|----+                       |
722             // +----=----------------|invalid middle_scope cache|-----------+
723             //
724             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
725             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
726             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
727             // wrong results.
728             //
729             // The cache and its invalidation for unwind branch is somewhat special. The cache is
730             // per-drop, rather than per scope, which has a several different implications. Adding
731             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
732             // scope. That is true, because none of the already existing drops will have an edge
733             // into a block with the newly added drop.
734             //
735             // Note that this code iterates scopes from the inner-most to the outer-most,
736             // invalidating caches of each scope visited. This way bare minimum of the
737             // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
738             // cache of outer scope stays intact.
739             scope.invalidate_cache(!needs_drop, this_scope);
740             if this_scope {
741                 if let DropKind::Value { .. } = drop_kind {
742                     scope.needs_cleanup = true;
743                 }
744
745                 let region_scope_span = region_scope.span(self.hir.tcx(),
746                                                           &self.hir.region_scope_tree);
747                 // Attribute scope exit drops to scope's closing brace.
748                 let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
749
750                 scope.drops.push(DropData {
751                     span: scope_end,
752                     location: place.clone(),
753                     kind: drop_kind
754                 });
755                 return;
756             }
757         }
758         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
759     }
760
761     // Other
762     // =====
763     /// Creates a path that performs all required cleanup for unwinding.
764     ///
765     /// This path terminates in Resume. Returns the start of the path.
766     /// See module comment for more details.
767     pub fn diverge_cleanup(&mut self) -> BasicBlock {
768         self.diverge_cleanup_gen(false)
769     }
770
771     fn resume_block(&mut self) -> BasicBlock {
772         if let Some(target) = self.cached_resume_block {
773             target
774         } else {
775             let resumeblk = self.cfg.start_new_cleanup_block();
776             self.cfg.terminate(resumeblk,
777                                SourceInfo {
778                                    scope: OUTERMOST_SOURCE_SCOPE,
779                                    span: self.fn_span
780                                },
781                                TerminatorKind::Resume);
782             self.cached_resume_block = Some(resumeblk);
783             resumeblk
784         }
785     }
786
787     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
788         // Build up the drops in **reverse** order. The end result will
789         // look like:
790         //
791         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
792         //
793         // However, we build this in **reverse order**. That is, we
794         // process scopes[0], then scopes[1], etc, pointing each one at
795         // the result generates from the one before. Along the way, we
796         // store caches. If everything is cached, we'll just walk right
797         // to left reading the cached results but never created anything.
798
799         // Find the last cached block
800         let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter()
801             .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) {
802             (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1)
803         } else {
804             (self.resume_block(), 0)
805         };
806
807         for scope in self.scopes[first_uncached..].iter_mut() {
808             target = build_diverge_scope(&mut self.cfg, scope.region_scope_span,
809                                          scope, target, generator_drop, self.is_generator);
810         }
811
812         target
813     }
814
815     /// Utility function for *non*-scope code to build their own drops
816     pub fn build_drop(&mut self,
817                       block: BasicBlock,
818                       span: Span,
819                       location: Place<'tcx>,
820                       ty: Ty<'tcx>) -> BlockAnd<()> {
821         if !self.hir.needs_drop(ty) {
822             return block.unit();
823         }
824         let source_info = self.source_info(span);
825         let next_target = self.cfg.start_new_block();
826         let diverge_target = self.diverge_cleanup();
827         self.cfg.terminate(block, source_info,
828                            TerminatorKind::Drop {
829                                location,
830                                target: next_target,
831                                unwind: Some(diverge_target),
832                            });
833         next_target.unit()
834     }
835
836     /// Utility function for *non*-scope code to build their own drops
837     pub fn build_drop_and_replace(&mut self,
838                                   block: BasicBlock,
839                                   span: Span,
840                                   location: Place<'tcx>,
841                                   value: Operand<'tcx>) -> BlockAnd<()> {
842         let source_info = self.source_info(span);
843         let next_target = self.cfg.start_new_block();
844         let diverge_target = self.diverge_cleanup();
845         self.cfg.terminate(block, source_info,
846                            TerminatorKind::DropAndReplace {
847                                location,
848                                value,
849                                target: next_target,
850                                unwind: Some(diverge_target),
851                            });
852         next_target.unit()
853     }
854
855     /// Creates an Assert terminator and return the success block.
856     /// If the boolean condition operand is not the expected value,
857     /// a runtime panic will be caused with the given message.
858     pub fn assert(&mut self, block: BasicBlock,
859                   cond: Operand<'tcx>,
860                   expected: bool,
861                   msg: AssertMessage<'tcx>,
862                   span: Span)
863                   -> BasicBlock {
864         let source_info = self.source_info(span);
865
866         let success_block = self.cfg.start_new_block();
867         let cleanup = self.diverge_cleanup();
868
869         self.cfg.terminate(block, source_info,
870                            TerminatorKind::Assert {
871                                cond,
872                                expected,
873                                msg,
874                                target: success_block,
875                                cleanup: Some(cleanup),
876                            });
877
878         success_block
879     }
880
881     // `match` arm scopes
882     // ==================
883     /// Unschedules any drops in the top scope.
884     ///
885     /// This is only needed for `match` arm scopes, because they have one
886     /// entrance per pattern, but only one exit.
887     pub fn clear_top_scope(&mut self, region_scope: region::Scope) {
888         let top_scope = self.scopes.last_mut().unwrap();
889
890         assert_eq!(top_scope.region_scope, region_scope);
891
892         top_scope.drops.clear();
893         top_scope.invalidate_cache(false, true);
894     }
895
896     /// Drops the single variable provided
897     ///
898     /// * The scope must be the top scope.
899     /// * The variable must be in that scope.
900     /// * The variable must be at the top of that scope: it's the next thing
901     ///   scheduled to drop.
902     /// * The drop must be of `DropKind::Storage`.
903     ///
904     /// This is used for the boolean holding the result of the match guard. We
905     /// do this because:
906     ///
907     /// * The boolean is different for each pattern
908     /// * There is only one exit for the arm scope
909     /// * The guard expression scope is too short, it ends just before the
910     ///   boolean is tested.
911     pub fn pop_variable(
912         &mut self,
913         block: BasicBlock,
914         region_scope: region::Scope,
915         variable: Local,
916     ) {
917         let top_scope = self.scopes.last_mut().unwrap();
918
919         assert_eq!(top_scope.region_scope, region_scope);
920
921         let top_drop_data = top_scope.drops.pop().unwrap();
922
923         match top_drop_data.kind {
924             DropKind::Value { .. } => {
925                 bug!("Should not be calling pop_top_variable on non-copy type!")
926             }
927             DropKind::Storage => {
928                 // Drop the storage for both value and storage drops.
929                 // Only temps and vars need their storage dead.
930                 match top_drop_data.location {
931                     Place::Base(PlaceBase::Local(index)) => {
932                         let source_info = top_scope.source_info(top_drop_data.span);
933                         assert_eq!(index, variable);
934                         self.cfg.push(block, Statement {
935                             source_info,
936                             kind: StatementKind::StorageDead(index)
937                         });
938                     }
939                     _ => unreachable!(),
940                 }
941             }
942         }
943
944         top_scope.invalidate_cache(true, true);
945     }
946
947 }
948
949 /// Builds drops for pop_scope and exit_scope.
950 fn build_scope_drops<'tcx>(
951     cfg: &mut CFG<'tcx>,
952     scope: &Scope<'tcx>,
953     mut block: BasicBlock,
954     last_unwind_to: BasicBlock,
955     arg_count: usize,
956     generator_drop: bool,
957 ) -> BlockAnd<()> {
958     debug!("build_scope_drops({:?} -> {:?}", block, scope);
959
960     // Build up the drops in evaluation order. The end result will
961     // look like:
962     //
963     // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
964     //               |                    |                 |
965     //               :                    |                 |
966     //                                    V                 V
967     // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
968     //
969     // The horizontal arrows represent the execution path when the drops return
970     // successfully. The downwards arrows represent the execution path when the
971     // drops panic (panicking while unwinding will abort, so there's no need for
972     // another set of arrows). The drops for the unwind path should have already
973     // been generated by `diverge_cleanup_gen`.
974
975     let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| {
976         if let DropKind::Value { cached_block } = drop_data.kind {
977             Some(cached_block.get(generator_drop).unwrap_or_else(|| {
978                 span_bug!(drop_data.span, "cached block not present?")
979             }))
980         } else {
981             None
982         }
983     });
984
985     // When we unwind from a drop, we start cleaning up from the next one, so
986     // we don't need this block.
987     unwind_blocks.next();
988
989     for drop_data in scope.drops.iter().rev() {
990         let source_info = scope.source_info(drop_data.span);
991         match drop_data.kind {
992             DropKind::Value { .. } => {
993                 let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to);
994
995                 let next = cfg.start_new_block();
996                 cfg.terminate(block, source_info, TerminatorKind::Drop {
997                     location: drop_data.location.clone(),
998                     target: next,
999                     unwind: Some(unwind_to)
1000                 });
1001                 block = next;
1002             }
1003             DropKind::Storage => {
1004                 // Drop the storage for both value and storage drops.
1005                 // Only temps and vars need their storage dead.
1006                 match drop_data.location {
1007                     Place::Base(PlaceBase::Local(index)) if index.index() > arg_count => {
1008                         cfg.push(block, Statement {
1009                             source_info,
1010                             kind: StatementKind::StorageDead(index)
1011                         });
1012                     }
1013                     _ => unreachable!(),
1014                 }
1015             }
1016         }
1017     }
1018     block.unit()
1019 }
1020
1021 fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
1022                              span: Span,
1023                              scope: &mut Scope<'tcx>,
1024                              mut target: BasicBlock,
1025                              generator_drop: bool,
1026                              is_generator: bool)
1027                              -> BasicBlock
1028 {
1029     // Build up the drops in **reverse** order. The end result will
1030     // look like:
1031     //
1032     //    [drops[n]] -...-> [drops[0]] -> [target]
1033     //
1034     // The code in this function reads from right to left. At each
1035     // point, we check for cached blocks representing the
1036     // remainder. If everything is cached, we'll just walk right to
1037     // left reading the cached results but never create anything.
1038
1039     let source_scope = scope.source_scope;
1040     let source_info = |span| SourceInfo {
1041         span,
1042         scope: source_scope
1043     };
1044
1045     // We keep track of StorageDead statements to prepend to our current block
1046     // and store them here, in reverse order.
1047     let mut storage_deads = vec![];
1048
1049     let mut target_built_by_us = false;
1050
1051     // Build up the drops. Here we iterate the vector in
1052     // *forward* order, so that we generate drops[0] first (right to
1053     // left in diagram above).
1054     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
1055         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
1056         // Only full value drops are emitted in the diverging path,
1057         // not StorageDead, except in the case of generators.
1058         //
1059         // Note: This may not actually be what we desire (are we
1060         // "freeing" stack storage as we unwind, or merely observing a
1061         // frozen stack)? In particular, the intent may have been to
1062         // match the behavior of clang, but on inspection eddyb says
1063         // this is not what clang does.
1064         match drop_data.kind {
1065             DropKind::Storage if is_generator => {
1066                 // Only temps and vars need their storage dead.
1067                 match drop_data.location {
1068                     Place::Base(PlaceBase::Local(index)) => {
1069                         storage_deads.push(Statement {
1070                             source_info: source_info(drop_data.span),
1071                             kind: StatementKind::StorageDead(index)
1072                         });
1073                     }
1074                     _ => unreachable!(),
1075                 };
1076             }
1077             DropKind::Storage => {}
1078             DropKind::Value { ref mut cached_block } => {
1079                 let cached_block = cached_block.ref_mut(generator_drop);
1080                 target = if let Some(cached_block) = *cached_block {
1081                     storage_deads.clear();
1082                     target_built_by_us = false;
1083                     cached_block
1084                 } else {
1085                     push_storage_deads(
1086                         cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope);
1087                     let block = cfg.start_new_cleanup_block();
1088                     cfg.terminate(block, source_info(drop_data.span),
1089                                   TerminatorKind::Drop {
1090                                       location: drop_data.location.clone(),
1091                                       target,
1092                                       unwind: None
1093                                   });
1094                     *cached_block = Some(block);
1095                     target_built_by_us = true;
1096                     block
1097                 };
1098             }
1099         };
1100     }
1101     push_storage_deads(cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope);
1102     *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
1103
1104     assert!(storage_deads.is_empty());
1105     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
1106
1107     target
1108 }
1109
1110 fn push_storage_deads(cfg: &mut CFG<'tcx>,
1111                       target: &mut BasicBlock,
1112                       storage_deads: &mut Vec<Statement<'tcx>>,
1113                       target_built_by_us: bool,
1114                       source_scope: SourceScope) {
1115     if storage_deads.is_empty() { return; }
1116     if !target_built_by_us {
1117         // We cannot add statements to an existing block, so we create a new
1118         // block for our StorageDead statements.
1119         let block = cfg.start_new_cleanup_block();
1120         let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope };
1121         cfg.terminate(block, source_info, TerminatorKind::Goto { target: *target });
1122         *target = block;
1123     }
1124     let statements = &mut cfg.block_data_mut(*target).statements;
1125     storage_deads.reverse();
1126     debug!("push_storage_deads({:?}), storage_deads={:?}, statements={:?}",
1127            *target, storage_deads, statements);
1128     storage_deads.append(statements);
1129     mem::swap(statements, storage_deads);
1130     assert!(storage_deads.is_empty());
1131 }