]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
Rollup merge of #58273 - taiki-e:rename-dependency, r=matthewjasper
[rust.git] / src / librustc_mir / build / scope.rs
1 /*!
2 Managing the scope stack. The scopes are tied to lexical scopes, so as
3 we descend the HAIR, we push a scope on the stack, build its
4 contents, and then pop it off. Every scope is named by a
5 `region::Scope`.
6
7 ### SEME Regions
8
9 When pushing a new scope, we record the current point in the graph (a
10 basic block); this marks the entry to the scope. We then generate more
11 stuff in the control-flow graph. Whenever the scope is exited, either
12 via a `break` or `return` or just by fallthrough, that marks an exit
13 from the scope. Each lexical scope thus corresponds to a single-entry,
14 multiple-exit (SEME) region in the control-flow graph.
15
16 For now, we keep a mapping from each `region::Scope` to its
17 corresponding SEME region for later reference (see caveat in next
18 paragraph). This is because region scopes are tied to
19 them. Eventually, when we shift to non-lexical lifetimes, there should
20 be no need to remember this mapping.
21
22 There is one additional wrinkle, actually, that I wanted to hide from
23 you but duty compels me to mention. In the course of building
24 matches, it sometimes happen that certain code (namely guards) gets
25 executed multiple times. This means that the scope lexical scope may
26 in fact correspond to multiple, disjoint SEME regions. So in fact our
27 mapping is from one scope to a vector of SEME regions.
28
29 ### Drops
30
31 The primary purpose for scopes is to insert drops: while building
32 the contents, we also accumulate places that need to be dropped upon
33 exit from each scope. This is done by calling `schedule_drop`. Once a
34 drop is scheduled, whenever we branch out we will insert drops of all
35 those places onto the outgoing edge. Note that we don't know the full
36 set of scheduled drops up front, and so whenever we exit from the
37 scope we only drop the values scheduled thus far. For example, consider
38 the scope S corresponding to this loop:
39
40 ```
41 # let cond = true;
42 loop {
43     let x = ..;
44     if cond { break; }
45     let y = ..;
46 }
47 ```
48
49 When processing the `let x`, we will add one drop to the scope for
50 `x`.  The break will then insert a drop for `x`. When we process `let
51 y`, we will add another drop (in fact, to a subscope, but let's ignore
52 that for now); any later drops would also drop `y`.
53
54 ### Early exit
55
56 There are numerous "normal" ways to early exit a scope: `break`,
57 `continue`, `return` (panics are handled separately). Whenever an
58 early exit occurs, the method `exit_scope` is called. It is given the
59 current point in execution where the early exit occurs, as well as the
60 scope you want to branch to (note that all early exits from to some
61 other enclosing scope). `exit_scope` will record this exit point and
62 also add all drops.
63
64 Panics are handled in a similar fashion, except that a panic always
65 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
66 `panic(p)` with the current point `p`. Or else you can call
67 `diverge_cleanup`, which will produce a block that you can branch to
68 which does the appropriate cleanup and then diverges. `panic(p)`
69 simply calls `diverge_cleanup()` and adds an edge from `p` to the
70 result.
71
72 ### Loop scopes
73
74 In addition to the normal scope stack, we track a loop scope stack
75 that contains only loops. It tracks where a `break` and `continue`
76 should go to.
77
78 */
79
80 use crate::build::{BlockAnd, BlockAndExtension, Builder, CFG};
81 use crate::hair::LintLevel;
82 use rustc::middle::region;
83 use rustc::ty::Ty;
84 use rustc::hir;
85 use rustc::hir::def_id::LOCAL_CRATE;
86 use rustc::mir::*;
87 use syntax_pos::{Span};
88 use rustc_data_structures::fx::FxHashMap;
89 use std::collections::hash_map::Entry;
90
91 #[derive(Debug)]
92 pub struct Scope<'tcx> {
93     /// The source scope this scope was created in.
94     source_scope: SourceScope,
95
96     /// the region span of this scope within source code.
97     region_scope: region::Scope,
98
99     /// the span of that region_scope
100     region_scope_span: Span,
101
102     /// Whether there's anything to do for the cleanup path, that is,
103     /// when unwinding through this scope. This includes destructors,
104     /// but not StorageDead statements, which don't get emitted at all
105     /// for unwinding, for several reasons:
106     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
107     ///  * LLVM's memory dependency analysis can't handle it atm
108     ///  * polluting the cleanup MIR with StorageDead creates
109     ///    landing pads even though there's no actual destructors
110     ///  * freeing up stack space has no effect during unwinding
111     needs_cleanup: bool,
112
113     /// set of places to drop when exiting this scope. This starts
114     /// out empty but grows as variables are declared during the
115     /// building process. This is a stack, so we always drop from the
116     /// end of the vector (top of the stack) first.
117     drops: Vec<DropData<'tcx>>,
118
119     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
120     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
121
122     /// The cache for drop chain on "generator drop" exit.
123     cached_generator_drop: Option<BasicBlock>,
124
125     /// The cache for drop chain on "unwind" exit.
126     cached_unwind: CachedBlock,
127 }
128
129 #[derive(Debug)]
130 struct DropData<'tcx> {
131     /// span where drop obligation was incurred (typically where place was declared)
132     span: Span,
133
134     /// place to drop
135     location: Place<'tcx>,
136
137     /// Whether this is a value Drop or a StorageDead.
138     kind: DropKind,
139 }
140
141 #[derive(Debug, Default, Clone, Copy)]
142 pub(crate) struct CachedBlock {
143     /// The cached block for the cleanups-on-diverge path. This block
144     /// contains code to run the current drop and all the preceding
145     /// drops (i.e., those having lower index in Drop’s Scope drop
146     /// array)
147     unwind: Option<BasicBlock>,
148
149     /// The cached block for unwinds during cleanups-on-generator-drop path
150     ///
151     /// This is split from the standard unwind path here to prevent drop
152     /// elaboration from creating drop flags that would have to be captured
153     /// by the generator. I'm not sure how important this optimization is,
154     /// but it is here.
155     generator_drop: Option<BasicBlock>,
156 }
157
158 #[derive(Debug)]
159 pub(crate) enum DropKind {
160     Value {
161         cached_block: CachedBlock,
162     },
163     Storage
164 }
165
166 #[derive(Clone, Debug)]
167 pub struct BreakableScope<'tcx> {
168     /// Region scope of the loop
169     pub region_scope: region::Scope,
170     /// Where the body of the loop begins. `None` if block
171     pub continue_block: Option<BasicBlock>,
172     /// Block to branch into when the loop or block terminates (either by being `break`-en out
173     /// from, or by having its condition to become false)
174     pub break_block: BasicBlock,
175     /// The destination of the loop/block expression itself (i.e., where to put the result of a
176     /// `break` expression)
177     pub break_destination: Place<'tcx>,
178 }
179
180 impl CachedBlock {
181     fn invalidate(&mut self) {
182         self.generator_drop = None;
183         self.unwind = None;
184     }
185
186     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
187         if generator_drop {
188             self.generator_drop
189         } else {
190             self.unwind
191         }
192     }
193
194     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
195         if generator_drop {
196             &mut self.generator_drop
197         } else {
198             &mut self.unwind
199         }
200     }
201 }
202
203 impl DropKind {
204     fn may_panic(&self) -> bool {
205         match *self {
206             DropKind::Value { .. } => true,
207             DropKind::Storage => false
208         }
209     }
210 }
211
212 impl<'tcx> Scope<'tcx> {
213     /// Invalidates all the cached blocks in the scope.
214     ///
215     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
216     /// larger extent of code.
217     ///
218     /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
219     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
220     /// top-of-scope (as opposed to dependent scopes).
221     fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
222         // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
223         // with lots of `try!`?
224
225         // cached exits drop storage and refer to the top-of-scope
226         self.cached_exits.clear();
227
228         if !storage_only {
229             // the current generator drop and unwind ignore
230             // storage but refer to top-of-scope
231             self.cached_generator_drop = None;
232             self.cached_unwind.invalidate();
233         }
234
235         if !storage_only && !this_scope_only {
236             for drop_data in &mut self.drops {
237                 if let DropKind::Value { ref mut cached_block } = drop_data.kind {
238                     cached_block.invalidate();
239                 }
240             }
241         }
242     }
243
244     /// Given a span and this scope's source scope, make a SourceInfo.
245     fn source_info(&self, span: Span) -> SourceInfo {
246         SourceInfo {
247             span,
248             scope: self.source_scope
249         }
250     }
251 }
252
253 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
254     // Adding and removing scopes
255     // ==========================
256     /// Start a breakable scope, which tracks where `continue` and `break`
257     /// should branch to. See module comment for more details.
258     ///
259     /// Returns the might_break attribute of the BreakableScope used.
260     pub fn in_breakable_scope<F, R>(&mut self,
261                                     loop_block: Option<BasicBlock>,
262                                     break_block: BasicBlock,
263                                     break_destination: Place<'tcx>,
264                                     f: F) -> R
265         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
266     {
267         let region_scope = self.topmost_scope();
268         let scope = BreakableScope {
269             region_scope,
270             continue_block: loop_block,
271             break_block,
272             break_destination,
273         };
274         self.breakable_scopes.push(scope);
275         let res = f(self);
276         let breakable_scope = self.breakable_scopes.pop().unwrap();
277         assert!(breakable_scope.region_scope == region_scope);
278         res
279     }
280
281     pub fn in_opt_scope<F, R>(&mut self,
282                               opt_scope: Option<(region::Scope, SourceInfo)>,
283                               mut block: BasicBlock,
284                               f: F)
285                               -> BlockAnd<R>
286         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
287     {
288         debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
289         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
290         let rv = unpack!(block = f(self));
291         if let Some(region_scope) = opt_scope {
292             unpack!(block = self.pop_scope(region_scope, block));
293         }
294         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
295         block.and(rv)
296     }
297
298     /// Convenience wrapper that pushes a scope and then executes `f`
299     /// to build its contents, popping the scope afterwards.
300     pub fn in_scope<F, R>(&mut self,
301                           region_scope: (region::Scope, SourceInfo),
302                           lint_level: LintLevel,
303                           mut block: BasicBlock,
304                           f: F)
305                           -> BlockAnd<R>
306         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
307     {
308         debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
309         let source_scope = self.source_scope;
310         let tcx = self.hir.tcx();
311         if let LintLevel::Explicit(node_id) = lint_level {
312             let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
313                 let sets = tcx.lint_levels(LOCAL_CRATE);
314                 let parent_hir_id =
315                     tcx.hir().definitions().node_to_hir_id(
316                         self.source_scope_local_data[source_scope].lint_root
317                     );
318                 let current_hir_id =
319                     tcx.hir().definitions().node_to_hir_id(node_id);
320                 sets.lint_level_set(parent_hir_id) ==
321                     sets.lint_level_set(current_hir_id)
322             });
323
324             if !same_lint_scopes {
325                 self.source_scope =
326                     self.new_source_scope(region_scope.1.span, lint_level,
327                                           None);
328             }
329         }
330         self.push_scope(region_scope);
331         let rv = unpack!(block = f(self));
332         unpack!(block = self.pop_scope(region_scope, block));
333         self.source_scope = source_scope;
334         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
335         block.and(rv)
336     }
337
338     /// Push a scope onto the stack. You can then build code in this
339     /// scope and call `pop_scope` afterwards. Note that these two
340     /// calls must be paired; using `in_scope` as a convenience
341     /// wrapper maybe preferable.
342     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
343         debug!("push_scope({:?})", region_scope);
344         let vis_scope = self.source_scope;
345         self.scopes.push(Scope {
346             source_scope: vis_scope,
347             region_scope: region_scope.0,
348             region_scope_span: region_scope.1.span,
349             needs_cleanup: false,
350             drops: vec![],
351             cached_generator_drop: None,
352             cached_exits: Default::default(),
353             cached_unwind: CachedBlock::default(),
354         });
355     }
356
357     /// Pops a scope, which should have region scope `region_scope`,
358     /// adding any drops onto the end of `block` that are needed.
359     /// This must match 1-to-1 with `push_scope`.
360     pub fn pop_scope(&mut self,
361                      region_scope: (region::Scope, SourceInfo),
362                      mut block: BasicBlock)
363                      -> BlockAnd<()> {
364         debug!("pop_scope({:?}, {:?})", region_scope, block);
365         // If we are emitting a `drop` statement, we need to have the cached
366         // diverge cleanup pads ready in case that drop panics.
367         let may_panic =
368             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
369         if may_panic {
370             self.diverge_cleanup();
371         }
372         let scope = self.scopes.pop().unwrap();
373         assert_eq!(scope.region_scope, region_scope.0);
374
375         let unwind_to = self.scopes.last().and_then(|next_scope| {
376             next_scope.cached_unwind.get(false)
377         }).unwrap_or_else(|| self.resume_block());
378
379         unpack!(block = build_scope_drops(
380             &mut self.cfg,
381             &scope,
382             block,
383             unwind_to,
384             self.arg_count,
385             false,
386         ));
387
388         block.unit()
389     }
390
391
392     /// Branch out of `block` to `target`, exiting all scopes up to
393     /// and including `region_scope`. This will insert whatever drops are
394     /// needed. See module comment for details.
395     pub fn exit_scope(&mut self,
396                       span: Span,
397                       region_scope: (region::Scope, SourceInfo),
398                       mut block: BasicBlock,
399                       target: BasicBlock) {
400         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
401                region_scope, block, target);
402         let scope_count = 1 + self.scopes.iter().rev()
403             .position(|scope| scope.region_scope == region_scope.0)
404             .unwrap_or_else(|| {
405                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
406             });
407         let len = self.scopes.len();
408         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
409
410         // If we are emitting a `drop` statement, we need to have the cached
411         // diverge cleanup pads ready in case that drop panics.
412         let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup);
413         if may_panic {
414             self.diverge_cleanup();
415         }
416
417         let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev();
418         let mut scope = scopes.next().unwrap();
419         for next_scope in scopes {
420             if scope.drops.is_empty() {
421                 scope = next_scope;
422                 continue;
423             }
424             let source_info = scope.source_info(span);
425             block = match scope.cached_exits.entry((target, region_scope.0)) {
426                 Entry::Occupied(e) => {
427                     self.cfg.terminate(block, source_info,
428                                     TerminatorKind::Goto { target: *e.get() });
429                     return;
430                 }
431                 Entry::Vacant(v) => {
432                     let b = self.cfg.start_new_block();
433                     self.cfg.terminate(block, source_info,
434                                     TerminatorKind::Goto { target: b });
435                     v.insert(b);
436                     b
437                 }
438             };
439
440             let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| {
441                 debug_assert!(!may_panic, "cached block not present?");
442                 START_BLOCK
443             });
444
445             unpack!(block = build_scope_drops(
446                 &mut self.cfg,
447                 scope,
448                 block,
449                 unwind_to,
450                 self.arg_count,
451                 false,
452             ));
453
454             scope = next_scope;
455         }
456
457         let scope = &self.scopes[len - scope_count];
458         self.cfg.terminate(block, scope.source_info(span),
459                            TerminatorKind::Goto { target });
460     }
461
462     /// Creates a path that performs all required cleanup for dropping a generator.
463     ///
464     /// This path terminates in GeneratorDrop. Returns the start of the path.
465     /// None indicates there’s no cleanup to do at this point.
466     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
467         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
468             return None;
469         }
470
471         // Fill in the cache for unwinds
472         self.diverge_cleanup_gen(true);
473
474         let src_info = self.scopes[0].source_info(self.fn_span);
475         let resume_block = self.resume_block();
476         let mut scopes = self.scopes.iter_mut().rev().peekable();
477         let mut block = self.cfg.start_new_block();
478         let result = block;
479
480         while let Some(scope) = scopes.next() {
481             if !scope.needs_cleanup {
482                 continue;
483             }
484
485             block = if let Some(b) = scope.cached_generator_drop {
486                 self.cfg.terminate(block, src_info,
487                                    TerminatorKind::Goto { target: b });
488                 return Some(result);
489             } else {
490                 let b = self.cfg.start_new_block();
491                 scope.cached_generator_drop = Some(b);
492                 self.cfg.terminate(block, src_info,
493                                    TerminatorKind::Goto { target: b });
494                 b
495             };
496
497             let unwind_to = scopes.peek().as_ref().map(|scope| {
498                 scope.cached_unwind.get(true).unwrap_or_else(|| {
499                     span_bug!(src_info.span, "cached block not present?")
500                 })
501             }).unwrap_or(resume_block);
502
503             unpack!(block = build_scope_drops(
504                 &mut self.cfg,
505                 scope,
506                 block,
507                 unwind_to,
508                 self.arg_count,
509                 true,
510             ));
511         }
512
513         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
514
515         Some(result)
516     }
517
518     /// Creates a new source scope, nested in the current one.
519     pub fn new_source_scope(&mut self,
520                             span: Span,
521                             lint_level: LintLevel,
522                             safety: Option<Safety>) -> SourceScope {
523         let parent = self.source_scope;
524         debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
525                span, lint_level, safety,
526                parent, self.source_scope_local_data.get(parent));
527         let scope = self.source_scopes.push(SourceScopeData {
528             span,
529             parent_scope: Some(parent),
530         });
531         let scope_local_data = SourceScopeLocalData {
532             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
533                 lint_root
534             } else {
535                 self.source_scope_local_data[parent].lint_root
536             },
537             safety: safety.unwrap_or_else(|| {
538                 self.source_scope_local_data[parent].safety
539             })
540         };
541         self.source_scope_local_data.push(scope_local_data);
542         scope
543     }
544
545     // Finding scopes
546     // ==============
547     /// Finds the breakable scope for a given label. This is used for
548     /// resolving `break` and `continue`.
549     pub fn find_breakable_scope(&self,
550                                 span: Span,
551                                 label: region::Scope)
552                                 -> &BreakableScope<'tcx> {
553         // find the loop-scope with the correct id
554         self.breakable_scopes.iter()
555             .rev()
556             .filter(|breakable_scope| breakable_scope.region_scope == label)
557             .next()
558             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
559     }
560
561     /// Given a span and the current source scope, make a SourceInfo.
562     pub fn source_info(&self, span: Span) -> SourceInfo {
563         SourceInfo {
564             span,
565             scope: self.source_scope
566         }
567     }
568
569     /// Returns the `region::Scope` of the scope which should be exited by a
570     /// return.
571     pub fn region_scope_of_return_scope(&self) -> region::Scope {
572         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
573         // We want `scopes[1]`, which is the `ParameterScope`.
574         assert!(self.scopes.len() >= 2);
575         assert!(match self.scopes[1].region_scope.data {
576             region::ScopeData::Arguments => true,
577             _ => false,
578         });
579         self.scopes[1].region_scope
580     }
581
582     /// Returns the topmost active scope, which is known to be alive until
583     /// the next scope expression.
584     pub fn topmost_scope(&self) -> region::Scope {
585         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
586     }
587
588     /// Returns the scope that we should use as the lifetime of an
589     /// operand. Basically, an operand must live until it is consumed.
590     /// This is similar to, but not quite the same as, the temporary
591     /// scope (which can be larger or smaller).
592     ///
593     /// Consider:
594     ///
595     ///     let x = foo(bar(X, Y));
596     ///
597     /// We wish to pop the storage for X and Y after `bar()` is
598     /// called, not after the whole `let` is completed.
599     ///
600     /// As another example, if the second argument diverges:
601     ///
602     ///     foo(Box::new(2), panic!())
603     ///
604     /// We would allocate the box but then free it on the unwinding
605     /// path; we would also emit a free on the 'success' path from
606     /// panic, but that will turn out to be removed as dead-code.
607     ///
608     /// When building statics/constants, returns `None` since
609     /// intermediate values do not have to be dropped in that case.
610     pub fn local_scope(&self) -> Option<region::Scope> {
611         match self.hir.body_owner_kind {
612             hir::BodyOwnerKind::Const |
613             hir::BodyOwnerKind::Static(_) =>
614                 // No need to free storage in this context.
615                 None,
616             hir::BodyOwnerKind::Closure |
617             hir::BodyOwnerKind::Fn =>
618                 Some(self.topmost_scope()),
619         }
620     }
621
622     // Schedule an abort block - this is used for some ABIs that cannot unwind
623     pub fn schedule_abort(&mut self) -> BasicBlock {
624         self.scopes[0].needs_cleanup = true;
625         let abortblk = self.cfg.start_new_cleanup_block();
626         let source_info = self.scopes[0].source_info(self.fn_span);
627         self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
628         self.cached_resume_block = Some(abortblk);
629         abortblk
630     }
631
632     pub fn schedule_drop_storage_and_value(
633         &mut self,
634         span: Span,
635         region_scope: region::Scope,
636         place: &Place<'tcx>,
637         place_ty: Ty<'tcx>,
638     ) {
639         self.schedule_drop(
640             span, region_scope, place, place_ty,
641             DropKind::Storage,
642         );
643         self.schedule_drop(
644             span, region_scope, place, place_ty,
645             DropKind::Value {
646                 cached_block: CachedBlock::default(),
647             },
648         );
649     }
650
651     // Scheduling drops
652     // ================
653     /// Indicates that `place` should be dropped on exit from
654     /// `region_scope`.
655     ///
656     /// When called with `DropKind::Storage`, `place` should be a local
657     /// with an index higher than the current `self.arg_count`.
658     pub fn schedule_drop(
659         &mut self,
660         span: Span,
661         region_scope: region::Scope,
662         place: &Place<'tcx>,
663         place_ty: Ty<'tcx>,
664         drop_kind: DropKind,
665     ) {
666         let needs_drop = self.hir.needs_drop(place_ty);
667         match drop_kind {
668             DropKind::Value { .. } => if !needs_drop { return },
669             DropKind::Storage => {
670                 match *place {
671                     Place::Local(index) => if index.index() <= self.arg_count {
672                         span_bug!(
673                             span, "`schedule_drop` called with index {} and arg_count {}",
674                             index.index(),
675                             self.arg_count,
676                         )
677                     },
678                     _ => span_bug!(
679                         span, "`schedule_drop` called with non-`Local` place {:?}", place
680                     ),
681                 }
682             }
683         }
684
685         for scope in self.scopes.iter_mut().rev() {
686             let this_scope = scope.region_scope == region_scope;
687             // When building drops, we try to cache chains of drops in such a way so these drops
688             // could be reused by the drops which would branch into the cached (already built)
689             // blocks.  This, however, means that whenever we add a drop into a scope which already
690             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
691             // might branch into the scope which had a drop just added to it. This is necessary,
692             // because otherwise some other code might use the cache to branch into already built
693             // chain of drops, essentially ignoring the newly added drop.
694             //
695             // For example consider there’s two scopes with a drop in each. These are built and
696             // thus the caches are filled:
697             //
698             // +--------------------------------------------------------+
699             // | +---------------------------------+                    |
700             // | | +--------+     +-------------+  |  +---------------+ |
701             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
702             // | | +--------+     +-------------+  |  +---------------+ |
703             // | +------------|outer_scope cache|--+                    |
704             // +------------------------------|middle_scope cache|------+
705             //
706             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
707             // outer-most scopes:
708             //
709             // +------------------------------------------------------------+
710             // | +----------------------------------+                       |
711             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
712             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
713             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
714             // | |             +-+ +-------------+  |                       |
715             // | +---|invalid outer_scope cache|----+                       |
716             // +----=----------------|invalid middle_scope cache|-----------+
717             //
718             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
719             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
720             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
721             // wrong results.
722             //
723             // The cache and its invalidation for unwind branch is somewhat special. The cache is
724             // per-drop, rather than per scope, which has a several different implications. Adding
725             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
726             // scope. That is true, because none of the already existing drops will have an edge
727             // into a block with the newly added drop.
728             //
729             // Note that this code iterates scopes from the inner-most to the outer-most,
730             // invalidating caches of each scope visited. This way bare minimum of the
731             // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
732             // cache of outer scpoe stays intact.
733             scope.invalidate_cache(!needs_drop, this_scope);
734             if this_scope {
735                 if let DropKind::Value { .. } = drop_kind {
736                     scope.needs_cleanup = true;
737                 }
738
739                 let region_scope_span = region_scope.span(self.hir.tcx(),
740                                                           &self.hir.region_scope_tree);
741                 // Attribute scope exit drops to scope's closing brace.
742                 let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
743
744                 scope.drops.push(DropData {
745                     span: scope_end,
746                     location: place.clone(),
747                     kind: drop_kind
748                 });
749                 return;
750             }
751         }
752         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
753     }
754
755     // Other
756     // =====
757     /// Creates a path that performs all required cleanup for unwinding.
758     ///
759     /// This path terminates in Resume. Returns the start of the path.
760     /// See module comment for more details.
761     pub fn diverge_cleanup(&mut self) -> BasicBlock {
762         self.diverge_cleanup_gen(false)
763     }
764
765     fn resume_block(&mut self) -> BasicBlock {
766         if let Some(target) = self.cached_resume_block {
767             target
768         } else {
769             let resumeblk = self.cfg.start_new_cleanup_block();
770             self.cfg.terminate(resumeblk,
771                                SourceInfo {
772                                    scope: OUTERMOST_SOURCE_SCOPE,
773                                    span: self.fn_span
774                                },
775                                TerminatorKind::Resume);
776             self.cached_resume_block = Some(resumeblk);
777             resumeblk
778         }
779     }
780
781     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
782         // Build up the drops in **reverse** order. The end result will
783         // look like:
784         //
785         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
786         //
787         // However, we build this in **reverse order**. That is, we
788         // process scopes[0], then scopes[1], etc, pointing each one at
789         // the result generates from the one before. Along the way, we
790         // store caches. If everything is cached, we'll just walk right
791         // to left reading the cached results but never created anything.
792
793         // Find the last cached block
794         let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter()
795             .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) {
796             (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1)
797         } else {
798             (self.resume_block(), 0)
799         };
800
801         for scope in self.scopes[first_uncached..].iter_mut() {
802             target = build_diverge_scope(&mut self.cfg, scope.region_scope_span,
803                                          scope, target, generator_drop);
804         }
805
806         target
807     }
808
809     /// Utility function for *non*-scope code to build their own drops
810     pub fn build_drop(&mut self,
811                       block: BasicBlock,
812                       span: Span,
813                       location: Place<'tcx>,
814                       ty: Ty<'tcx>) -> BlockAnd<()> {
815         if !self.hir.needs_drop(ty) {
816             return block.unit();
817         }
818         let source_info = self.source_info(span);
819         let next_target = self.cfg.start_new_block();
820         let diverge_target = self.diverge_cleanup();
821         self.cfg.terminate(block, source_info,
822                            TerminatorKind::Drop {
823                                location,
824                                target: next_target,
825                                unwind: Some(diverge_target),
826                            });
827         next_target.unit()
828     }
829
830     /// Utility function for *non*-scope code to build their own drops
831     pub fn build_drop_and_replace(&mut self,
832                                   block: BasicBlock,
833                                   span: Span,
834                                   location: Place<'tcx>,
835                                   value: Operand<'tcx>) -> BlockAnd<()> {
836         let source_info = self.source_info(span);
837         let next_target = self.cfg.start_new_block();
838         let diverge_target = self.diverge_cleanup();
839         self.cfg.terminate(block, source_info,
840                            TerminatorKind::DropAndReplace {
841                                location,
842                                value,
843                                target: next_target,
844                                unwind: Some(diverge_target),
845                            });
846         next_target.unit()
847     }
848
849     /// Creates an Assert terminator and return the success block.
850     /// If the boolean condition operand is not the expected value,
851     /// a runtime panic will be caused with the given message.
852     pub fn assert(&mut self, block: BasicBlock,
853                   cond: Operand<'tcx>,
854                   expected: bool,
855                   msg: AssertMessage<'tcx>,
856                   span: Span)
857                   -> BasicBlock {
858         let source_info = self.source_info(span);
859
860         let success_block = self.cfg.start_new_block();
861         let cleanup = self.diverge_cleanup();
862
863         self.cfg.terminate(block, source_info,
864                            TerminatorKind::Assert {
865                                cond,
866                                expected,
867                                msg,
868                                target: success_block,
869                                cleanup: Some(cleanup),
870                            });
871
872         success_block
873     }
874 }
875
876 /// Builds drops for pop_scope and exit_scope.
877 fn build_scope_drops<'tcx>(
878     cfg: &mut CFG<'tcx>,
879     scope: &Scope<'tcx>,
880     mut block: BasicBlock,
881     last_unwind_to: BasicBlock,
882     arg_count: usize,
883     generator_drop: bool,
884 ) -> BlockAnd<()> {
885     debug!("build_scope_drops({:?} -> {:?}", block, scope);
886
887     // Build up the drops in evaluation order. The end result will
888     // look like:
889     //
890     // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
891     //               |                    |                 |
892     //               :                    |                 |
893     //                                    V                 V
894     // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
895     //
896     // The horizontal arrows represent the execution path when the drops return
897     // successfully. The downwards arrows represent the execution path when the
898     // drops panic (panicking while unwinding will abort, so there's no need for
899     // another set of arrows). The drops for the unwind path should have already
900     // been generated by `diverge_cleanup_gen`.
901     //
902     // The code in this function reads from right to left.
903     // Storage dead drops have to be done left to right (since we can only push
904     // to the end of a Vec). So, we find the next drop and then call
905     // push_storage_deads which will iterate backwards through them so that
906     // they are added in the correct order.
907
908     let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| {
909         if let DropKind::Value { cached_block } = drop_data.kind {
910             Some(cached_block.get(generator_drop).unwrap_or_else(|| {
911                 span_bug!(drop_data.span, "cached block not present?")
912             }))
913         } else {
914             None
915         }
916     });
917
918     // When we unwind from a drop, we start cleaning up from the next one, so
919     // we don't need this block.
920     unwind_blocks.next();
921
922     for drop_data in scope.drops.iter().rev() {
923         let source_info = scope.source_info(drop_data.span);
924         match drop_data.kind {
925             DropKind::Value { .. } => {
926                 let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to);
927
928                 let next = cfg.start_new_block();
929                 cfg.terminate(block, source_info, TerminatorKind::Drop {
930                     location: drop_data.location.clone(),
931                     target: next,
932                     unwind: Some(unwind_to)
933                 });
934                 block = next;
935             }
936             DropKind::Storage => {
937                 // We do not need to emit StorageDead for generator drops
938                 if generator_drop {
939                     continue
940                 }
941
942                 // Drop the storage for both value and storage drops.
943                 // Only temps and vars need their storage dead.
944                 match drop_data.location {
945                     Place::Local(index) if index.index() > arg_count => {
946                         cfg.push(block, Statement {
947                             source_info,
948                             kind: StatementKind::StorageDead(index)
949                         });
950                     }
951                     _ => unreachable!(),
952                 }
953             }
954         }
955     }
956     block.unit()
957 }
958
959 fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
960                              span: Span,
961                              scope: &mut Scope<'tcx>,
962                              mut target: BasicBlock,
963                              generator_drop: bool)
964                              -> BasicBlock
965 {
966     // Build up the drops in **reverse** order. The end result will
967     // look like:
968     //
969     //    [drops[n]] -...-> [drops[0]] -> [target]
970     //
971     // The code in this function reads from right to left. At each
972     // point, we check for cached blocks representing the
973     // remainder. If everything is cached, we'll just walk right to
974     // left reading the cached results but never create anything.
975
976     let source_scope = scope.source_scope;
977     let source_info = |span| SourceInfo {
978         span,
979         scope: source_scope
980     };
981
982     // Next, build up the drops. Here we iterate the vector in
983     // *forward* order, so that we generate drops[0] first (right to
984     // left in diagram above).
985     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
986         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
987         // Only full value drops are emitted in the diverging path,
988         // not StorageDead.
989         //
990         // Note: This may not actually be what we desire (are we
991         // "freeing" stack storage as we unwind, or merely observing a
992         // frozen stack)? In particular, the intent may have been to
993         // match the behavior of clang, but on inspection eddyb says
994         // this is not what clang does.
995         let cached_block = match drop_data.kind {
996             DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
997             DropKind::Storage => continue
998         };
999         target = if let Some(cached_block) = *cached_block {
1000             cached_block
1001         } else {
1002             let block = cfg.start_new_cleanup_block();
1003             cfg.terminate(block, source_info(drop_data.span),
1004                           TerminatorKind::Drop {
1005                               location: drop_data.location.clone(),
1006                               target,
1007                               unwind: None
1008                           });
1009             *cached_block = Some(block);
1010             block
1011         };
1012     }
1013
1014     *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
1015
1016     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
1017
1018     target
1019 }