]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
Rollup merge of #55816 - nnethercote:from_decimal_string-SmallVec, r=oli-obk
[rust.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, build its
14 contents, and then pop it off. Every scope is named by a
15 `region::Scope`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of building
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while building
42 the contents, we also accumulate places that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those places onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```
51 # let cond = true;
52 loop {
53     let x = ..;
54     if cond { break; }
55     let y = ..;
56 }
57 ```
58
59 When processing the `let x`, we will add one drop to the scope for
60 `x`.  The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
63
64 ### Early exit
65
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
72 also add all drops.
73
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
80 result.
81
82 ### Loop scopes
83
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
86 should go to.
87
88 */
89
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
91 use hair::LintLevel;
92 use rustc::middle::region;
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::hir;
95 use rustc::hir::def_id::LOCAL_CRATE;
96 use rustc::mir::*;
97 use syntax_pos::{Span};
98 use rustc_data_structures::fx::FxHashMap;
99
100 #[derive(Debug)]
101 pub struct Scope<'tcx> {
102     /// The source scope this scope was created in.
103     source_scope: SourceScope,
104
105     /// the region span of this scope within source code.
106     region_scope: region::Scope,
107
108     /// the span of that region_scope
109     region_scope_span: Span,
110
111     /// Whether there's anything to do for the cleanup path, that is,
112     /// when unwinding through this scope. This includes destructors,
113     /// but not StorageDead statements, which don't get emitted at all
114     /// for unwinding, for several reasons:
115     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
116     ///  * LLVM's memory dependency analysis can't handle it atm
117     ///  * polluting the cleanup MIR with StorageDead creates
118     ///    landing pads even though there's no actual destructors
119     ///  * freeing up stack space has no effect during unwinding
120     needs_cleanup: bool,
121
122     /// set of places to drop when exiting this scope. This starts
123     /// out empty but grows as variables are declared during the
124     /// building process. This is a stack, so we always drop from the
125     /// end of the vector (top of the stack) first.
126     drops: Vec<DropData<'tcx>>,
127
128     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
129     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
130
131     /// The cache for drop chain on "generator drop" exit.
132     cached_generator_drop: Option<BasicBlock>,
133
134     /// The cache for drop chain on "unwind" exit.
135     cached_unwind: CachedBlock,
136 }
137
138 #[derive(Debug)]
139 struct DropData<'tcx> {
140     /// span where drop obligation was incurred (typically where place was declared)
141     span: Span,
142
143     /// place to drop
144     location: Place<'tcx>,
145
146     /// Whether this is a value Drop or a StorageDead.
147     kind: DropKind,
148 }
149
150 #[derive(Debug, Default, Clone, Copy)]
151 pub(crate) struct CachedBlock {
152     /// The cached block for the cleanups-on-diverge path. This block
153     /// contains code to run the current drop and all the preceding
154     /// drops (i.e. those having lower index in Drop’s Scope drop
155     /// array)
156     unwind: Option<BasicBlock>,
157
158     /// The cached block for unwinds during cleanups-on-generator-drop path
159     ///
160     /// This is split from the standard unwind path here to prevent drop
161     /// elaboration from creating drop flags that would have to be captured
162     /// by the generator. I'm not sure how important this optimization is,
163     /// but it is here.
164     generator_drop: Option<BasicBlock>,
165 }
166
167 #[derive(Debug)]
168 pub(crate) enum DropKind {
169     Value {
170         cached_block: CachedBlock,
171     },
172     Storage
173 }
174
175 #[derive(Clone, Debug)]
176 pub struct BreakableScope<'tcx> {
177     /// Region scope of the loop
178     pub region_scope: region::Scope,
179     /// Where the body of the loop begins. `None` if block
180     pub continue_block: Option<BasicBlock>,
181     /// Block to branch into when the loop or block terminates (either by being `break`-en out
182     /// from, or by having its condition to become false)
183     pub break_block: BasicBlock,
184     /// The destination of the loop/block expression itself (i.e. where to put the result of a
185     /// `break` expression)
186     pub break_destination: Place<'tcx>,
187 }
188
189 impl CachedBlock {
190     fn invalidate(&mut self) {
191         self.generator_drop = None;
192         self.unwind = None;
193     }
194
195     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
196         if generator_drop {
197             self.generator_drop
198         } else {
199             self.unwind
200         }
201     }
202
203     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
204         if generator_drop {
205             &mut self.generator_drop
206         } else {
207             &mut self.unwind
208         }
209     }
210 }
211
212 impl DropKind {
213     fn may_panic(&self) -> bool {
214         match *self {
215             DropKind::Value { .. } => true,
216             DropKind::Storage => false
217         }
218     }
219 }
220
221 impl<'tcx> Scope<'tcx> {
222     /// Invalidate all the cached blocks in the scope.
223     ///
224     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
225     /// larger extent of code.
226     ///
227     /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
228     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
229     /// top-of-scope (as opposed to dependent scopes).
230     fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
231         // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
232         // with lots of `try!`?
233
234         // cached exits drop storage and refer to the top-of-scope
235         self.cached_exits.clear();
236
237         if !storage_only {
238             // the current generator drop and unwind ignore
239             // storage but refer to top-of-scope
240             self.cached_generator_drop = None;
241             self.cached_unwind.invalidate();
242         }
243
244         if !storage_only && !this_scope_only {
245             for dropdata in &mut self.drops {
246                 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
247                     cached_block.invalidate();
248                 }
249             }
250         }
251     }
252
253     /// Given a span and this scope's source scope, make a SourceInfo.
254     fn source_info(&self, span: Span) -> SourceInfo {
255         SourceInfo {
256             span,
257             scope: self.source_scope
258         }
259     }
260 }
261
262 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
263     // Adding and removing scopes
264     // ==========================
265     /// Start a breakable scope, which tracks where `continue` and `break`
266     /// should branch to. See module comment for more details.
267     ///
268     /// Returns the might_break attribute of the BreakableScope used.
269     pub fn in_breakable_scope<F, R>(&mut self,
270                                     loop_block: Option<BasicBlock>,
271                                     break_block: BasicBlock,
272                                     break_destination: Place<'tcx>,
273                                     f: F) -> R
274         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
275     {
276         let region_scope = self.topmost_scope();
277         let scope = BreakableScope {
278             region_scope,
279             continue_block: loop_block,
280             break_block,
281             break_destination,
282         };
283         self.breakable_scopes.push(scope);
284         let res = f(self);
285         let breakable_scope = self.breakable_scopes.pop().unwrap();
286         assert!(breakable_scope.region_scope == region_scope);
287         res
288     }
289
290     pub fn in_opt_scope<F, R>(&mut self,
291                               opt_scope: Option<(region::Scope, SourceInfo)>,
292                               mut block: BasicBlock,
293                               f: F)
294                               -> BlockAnd<R>
295         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
296     {
297         debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
298         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
299         let rv = unpack!(block = f(self));
300         if let Some(region_scope) = opt_scope {
301             unpack!(block = self.pop_scope(region_scope, block));
302         }
303         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
304         block.and(rv)
305     }
306
307     /// Convenience wrapper that pushes a scope and then executes `f`
308     /// to build its contents, popping the scope afterwards.
309     pub fn in_scope<F, R>(&mut self,
310                           region_scope: (region::Scope, SourceInfo),
311                           lint_level: LintLevel,
312                           mut block: BasicBlock,
313                           f: F)
314                           -> BlockAnd<R>
315         where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
316     {
317         debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
318         let source_scope = self.source_scope;
319         let tcx = self.hir.tcx();
320         if let LintLevel::Explicit(node_id) = lint_level {
321             let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
322                 let sets = tcx.lint_levels(LOCAL_CRATE);
323                 let parent_hir_id =
324                     tcx.hir.definitions().node_to_hir_id(
325                         self.source_scope_local_data[source_scope].lint_root
326                             );
327                 let current_hir_id =
328                     tcx.hir.definitions().node_to_hir_id(node_id);
329                 sets.lint_level_set(parent_hir_id) ==
330                     sets.lint_level_set(current_hir_id)
331             });
332
333             if !same_lint_scopes {
334                 self.source_scope =
335                     self.new_source_scope(region_scope.1.span, lint_level,
336                                               None);
337             }
338         }
339         self.push_scope(region_scope);
340         let rv = unpack!(block = f(self));
341         unpack!(block = self.pop_scope(region_scope, block));
342         self.source_scope = source_scope;
343         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
344         block.and(rv)
345     }
346
347     /// Push a scope onto the stack. You can then build code in this
348     /// scope and call `pop_scope` afterwards. Note that these two
349     /// calls must be paired; using `in_scope` as a convenience
350     /// wrapper maybe preferable.
351     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
352         debug!("push_scope({:?})", region_scope);
353         let vis_scope = self.source_scope;
354         self.scopes.push(Scope {
355             source_scope: vis_scope,
356             region_scope: region_scope.0,
357             region_scope_span: region_scope.1.span,
358             needs_cleanup: false,
359             drops: vec![],
360             cached_generator_drop: None,
361             cached_exits: Default::default(),
362             cached_unwind: CachedBlock::default(),
363         });
364     }
365
366     /// Pops a scope, which should have region scope `region_scope`,
367     /// adding any drops onto the end of `block` that are needed.
368     /// This must match 1-to-1 with `push_scope`.
369     pub fn pop_scope(&mut self,
370                      region_scope: (region::Scope, SourceInfo),
371                      mut block: BasicBlock)
372                      -> BlockAnd<()> {
373         debug!("pop_scope({:?}, {:?})", region_scope, block);
374         // If we are emitting a `drop` statement, we need to have the cached
375         // diverge cleanup pads ready in case that drop panics.
376         let may_panic =
377             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
378         if may_panic {
379             self.diverge_cleanup();
380         }
381         let scope = self.scopes.pop().unwrap();
382         assert_eq!(scope.region_scope, region_scope.0);
383
384         self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
385         let resume_block = self.resume_block();
386         unpack!(block = build_scope_drops(&mut self.cfg,
387                                           resume_block,
388                                           &scope,
389                                           &self.scopes,
390                                           block,
391                                           self.arg_count,
392                                           false));
393
394         block.unit()
395     }
396
397
398     /// Branch out of `block` to `target`, exiting all scopes up to
399     /// and including `region_scope`.  This will insert whatever drops are
400     /// needed, as well as tracking this exit for the SEME region. See
401     /// module comment for details.
402     pub fn exit_scope(&mut self,
403                       span: Span,
404                       region_scope: (region::Scope, SourceInfo),
405                       mut block: BasicBlock,
406                       target: BasicBlock) {
407         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
408                region_scope, block, target);
409         let scope_count = 1 + self.scopes.iter().rev()
410             .position(|scope| scope.region_scope == region_scope.0)
411             .unwrap_or_else(|| {
412                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
413             });
414         let len = self.scopes.len();
415         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
416
417         // If we are emitting a `drop` statement, we need to have the cached
418         // diverge cleanup pads ready in case that drop panics.
419         let may_panic = self.scopes[(len - scope_count)..].iter()
420             .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
421         if may_panic {
422             self.diverge_cleanup();
423         }
424
425         {
426         let resume_block = self.resume_block();
427         let mut rest = &mut self.scopes[(len - scope_count)..];
428         while let Some((scope, rest_)) = {rest}.split_last_mut() {
429             rest = rest_;
430             block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
431                 self.cfg.terminate(block, scope.source_info(span),
432                                    TerminatorKind::Goto { target: e });
433                 return;
434             } else {
435                 let b = self.cfg.start_new_block();
436                 self.cfg.terminate(block, scope.source_info(span),
437                                    TerminatorKind::Goto { target: b });
438                 scope.cached_exits.insert((target, region_scope.0), b);
439                 b
440             };
441
442             // End all regions for scopes out of which we are breaking.
443             self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
444
445             unpack!(block = build_scope_drops(&mut self.cfg,
446                                               resume_block,
447                                               scope,
448                                               rest,
449                                               block,
450                                               self.arg_count,
451                                               false));
452         }
453         }
454         let scope = &self.scopes[len - scope_count];
455         self.cfg.terminate(block, scope.source_info(span),
456                            TerminatorKind::Goto { target });
457     }
458
459     /// Creates a path that performs all required cleanup for dropping a generator.
460     ///
461     /// This path terminates in GeneratorDrop. Returns the start of the path.
462     /// None indicates there’s no cleanup to do at this point.
463     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
464         if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
465             return None;
466         }
467
468         // Fill in the cache
469         self.diverge_cleanup_gen(true);
470
471         let src_info = self.scopes[0].source_info(self.fn_span);
472         let mut block = self.cfg.start_new_block();
473         let result = block;
474         let resume_block = self.resume_block();
475         let mut rest = &mut self.scopes[..];
476
477         while let Some((scope, rest_)) = {rest}.split_last_mut() {
478             rest = rest_;
479             if !scope.needs_cleanup {
480                 continue;
481             }
482             block = if let Some(b) = scope.cached_generator_drop {
483                 self.cfg.terminate(block, src_info,
484                                    TerminatorKind::Goto { target: b });
485                 return Some(result);
486             } else {
487                 let b = self.cfg.start_new_block();
488                 scope.cached_generator_drop = Some(b);
489                 self.cfg.terminate(block, src_info,
490                                    TerminatorKind::Goto { target: b });
491                 b
492             };
493
494             // End all regions for scopes out of which we are breaking.
495             self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
496
497             unpack!(block = build_scope_drops(&mut self.cfg,
498                                               resume_block,
499                                               scope,
500                                               rest,
501                                               block,
502                                               self.arg_count,
503                                               true));
504         }
505
506         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
507
508         Some(result)
509     }
510
511     /// Creates a new source scope, nested in the current one.
512     pub fn new_source_scope(&mut self,
513                                 span: Span,
514                                 lint_level: LintLevel,
515                                 safety: Option<Safety>) -> SourceScope {
516         let parent = self.source_scope;
517         debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
518                span, lint_level, safety,
519                parent, self.source_scope_local_data.get(parent));
520         let scope = self.source_scopes.push(SourceScopeData {
521             span,
522             parent_scope: Some(parent),
523         });
524         let scope_local_data = SourceScopeLocalData {
525             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
526                 lint_root
527             } else {
528                 self.source_scope_local_data[parent].lint_root
529             },
530             safety: safety.unwrap_or_else(|| {
531                 self.source_scope_local_data[parent].safety
532             })
533         };
534         self.source_scope_local_data.push(scope_local_data);
535         scope
536     }
537
538     // Finding scopes
539     // ==============
540     /// Finds the breakable scope for a given label. This is used for
541     /// resolving `break` and `continue`.
542     pub fn find_breakable_scope(&self,
543                                 span: Span,
544                                 label: region::Scope)
545                                 -> &BreakableScope<'tcx> {
546         // find the loop-scope with the correct id
547         self.breakable_scopes.iter()
548             .rev()
549             .filter(|breakable_scope| breakable_scope.region_scope == label)
550             .next()
551             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
552     }
553
554     /// Given a span and the current source scope, make a SourceInfo.
555     pub fn source_info(&self, span: Span) -> SourceInfo {
556         SourceInfo {
557             span,
558             scope: self.source_scope
559         }
560     }
561
562     /// Returns the `region::Scope` of the scope which should be exited by a
563     /// return.
564     pub fn region_scope_of_return_scope(&self) -> region::Scope {
565         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
566         // We want `scopes[1]`, which is the `ParameterScope`.
567         assert!(self.scopes.len() >= 2);
568         assert!(match self.scopes[1].region_scope.data {
569             region::ScopeData::Arguments => true,
570             _ => false,
571         });
572         self.scopes[1].region_scope
573     }
574
575     /// Returns the topmost active scope, which is known to be alive until
576     /// the next scope expression.
577     pub fn topmost_scope(&self) -> region::Scope {
578         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
579     }
580
581     /// Returns the scope that we should use as the lifetime of an
582     /// operand. Basically, an operand must live until it is consumed.
583     /// This is similar to, but not quite the same as, the temporary
584     /// scope (which can be larger or smaller).
585     ///
586     /// Consider:
587     ///
588     ///     let x = foo(bar(X, Y));
589     ///
590     /// We wish to pop the storage for X and Y after `bar()` is
591     /// called, not after the whole `let` is completed.
592     ///
593     /// As another example, if the second argument diverges:
594     ///
595     ///     foo(Box::new(2), panic!())
596     ///
597     /// We would allocate the box but then free it on the unwinding
598     /// path; we would also emit a free on the 'success' path from
599     /// panic, but that will turn out to be removed as dead-code.
600     ///
601     /// When building statics/constants, returns `None` since
602     /// intermediate values do not have to be dropped in that case.
603     pub fn local_scope(&self) -> Option<region::Scope> {
604         match self.hir.body_owner_kind {
605             hir::BodyOwnerKind::Const |
606             hir::BodyOwnerKind::Static(_) =>
607                 // No need to free storage in this context.
608                 None,
609             hir::BodyOwnerKind::Fn =>
610                 Some(self.topmost_scope()),
611         }
612     }
613
614     // Schedule an abort block - this is used for some ABIs that cannot unwind
615     pub fn schedule_abort(&mut self) -> BasicBlock {
616         self.scopes[0].needs_cleanup = true;
617         let abortblk = self.cfg.start_new_cleanup_block();
618         let source_info = self.scopes[0].source_info(self.fn_span);
619         self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
620         self.cached_resume_block = Some(abortblk);
621         abortblk
622     }
623
624     pub fn schedule_drop_storage_and_value(
625         &mut self,
626         span: Span,
627         region_scope: region::Scope,
628         place: &Place<'tcx>,
629         place_ty: Ty<'tcx>,
630     ) {
631         self.schedule_drop(
632             span, region_scope, place, place_ty,
633             DropKind::Storage,
634         );
635         self.schedule_drop(
636             span, region_scope, place, place_ty,
637             DropKind::Value {
638                 cached_block: CachedBlock::default(),
639             },
640         );
641     }
642
643     // Scheduling drops
644     // ================
645     /// Indicates that `place` should be dropped on exit from
646     /// `region_scope`.
647     ///
648     /// When called with `DropKind::Storage`, `place` should be a local
649     /// with an index higher than the current `self.arg_count`.
650     pub fn schedule_drop(
651         &mut self,
652         span: Span,
653         region_scope: region::Scope,
654         place: &Place<'tcx>,
655         place_ty: Ty<'tcx>,
656         drop_kind: DropKind,
657     ) {
658         let needs_drop = self.hir.needs_drop(place_ty);
659         match drop_kind {
660             DropKind::Value { .. } => if !needs_drop { return },
661             DropKind::Storage => {
662                 match *place {
663                     Place::Local(index) => if index.index() <= self.arg_count {
664                         span_bug!(
665                             span, "`schedule_drop` called with index {} and arg_count {}",
666                             index.index(),
667                             self.arg_count,
668                         )
669                     },
670                     _ => span_bug!(
671                         span, "`schedule_drop` called with non-`Local` place {:?}", place
672                     ),
673                 }
674             }
675         }
676
677         for scope in self.scopes.iter_mut().rev() {
678             let this_scope = scope.region_scope == region_scope;
679             // When building drops, we try to cache chains of drops in such a way so these drops
680             // could be reused by the drops which would branch into the cached (already built)
681             // blocks.  This, however, means that whenever we add a drop into a scope which already
682             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
683             // might branch into the scope which had a drop just added to it. This is necessary,
684             // because otherwise some other code might use the cache to branch into already built
685             // chain of drops, essentially ignoring the newly added drop.
686             //
687             // For example consider there’s two scopes with a drop in each. These are built and
688             // thus the caches are filled:
689             //
690             // +--------------------------------------------------------+
691             // | +---------------------------------+                    |
692             // | | +--------+     +-------------+  |  +---------------+ |
693             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
694             // | | +--------+     +-------------+  |  +---------------+ |
695             // | +------------|outer_scope cache|--+                    |
696             // +------------------------------|middle_scope cache|------+
697             //
698             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
699             // outer-most scopes:
700             //
701             // +------------------------------------------------------------+
702             // | +----------------------------------+                       |
703             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
704             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
705             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
706             // | |             +-+ +-------------+  |                       |
707             // | +---|invalid outer_scope cache|----+                       |
708             // +----=----------------|invalid middle_scope cache|-----------+
709             //
710             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
711             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
712             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
713             // wrong results.
714             //
715             // The cache and its invalidation for unwind branch is somewhat special. The cache is
716             // per-drop, rather than per scope, which has a several different implications. Adding
717             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
718             // scope. That is true, because none of the already existing drops will have an edge
719             // into a block with the newly added drop.
720             //
721             // Note that this code iterates scopes from the inner-most to the outer-most,
722             // invalidating caches of each scope visited. This way bare minimum of the
723             // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
724             // cache of outer scpoe stays intact.
725             scope.invalidate_cache(!needs_drop, this_scope);
726             if this_scope {
727                 if let DropKind::Value { .. } = drop_kind {
728                     scope.needs_cleanup = true;
729                 }
730
731                 let region_scope_span = region_scope.span(self.hir.tcx(),
732                                                           &self.hir.region_scope_tree);
733                 // Attribute scope exit drops to scope's closing brace.
734                 let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
735
736                 scope.drops.push(DropData {
737                     span: scope_end,
738                     location: place.clone(),
739                     kind: drop_kind
740                 });
741                 return;
742             }
743         }
744         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
745     }
746
747     // Other
748     // =====
749     /// Creates a path that performs all required cleanup for unwinding.
750     ///
751     /// This path terminates in Resume. Returns the start of the path.
752     /// See module comment for more details. None indicates there’s no
753     /// cleanup to do at this point.
754     pub fn diverge_cleanup(&mut self) -> BasicBlock {
755         self.diverge_cleanup_gen(false)
756     }
757
758     fn resume_block(&mut self) -> BasicBlock {
759         if let Some(target) = self.cached_resume_block {
760             target
761         } else {
762             let resumeblk = self.cfg.start_new_cleanup_block();
763             self.cfg.terminate(resumeblk,
764                                SourceInfo {
765                                    scope: OUTERMOST_SOURCE_SCOPE,
766                                    span: self.fn_span
767                                },
768                                TerminatorKind::Resume);
769             self.cached_resume_block = Some(resumeblk);
770             resumeblk
771         }
772     }
773
774     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
775         // To start, create the resume terminator.
776         let mut target = self.resume_block();
777
778         let Builder { ref mut cfg, ref mut scopes, .. } = *self;
779
780         // Build up the drops in **reverse** order. The end result will
781         // look like:
782         //
783         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
784         //
785         // However, we build this in **reverse order**. That is, we
786         // process scopes[0], then scopes[1], etc, pointing each one at
787         // the result generates from the one before. Along the way, we
788         // store caches. If everything is cached, we'll just walk right
789         // to left reading the cached results but never created anything.
790
791         if scopes.iter().any(|scope| scope.needs_cleanup) {
792             for scope in scopes.iter_mut() {
793                 target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
794                                              scope, target, generator_drop);
795             }
796         }
797
798         target
799     }
800
801     /// Utility function for *non*-scope code to build their own drops
802     pub fn build_drop(&mut self,
803                       block: BasicBlock,
804                       span: Span,
805                       location: Place<'tcx>,
806                       ty: Ty<'tcx>) -> BlockAnd<()> {
807         if !self.hir.needs_drop(ty) {
808             return block.unit();
809         }
810         let source_info = self.source_info(span);
811         let next_target = self.cfg.start_new_block();
812         let diverge_target = self.diverge_cleanup();
813         self.cfg.terminate(block, source_info,
814                            TerminatorKind::Drop {
815                                location,
816                                target: next_target,
817                                unwind: Some(diverge_target),
818                            });
819         next_target.unit()
820     }
821
822     /// Utility function for *non*-scope code to build their own drops
823     pub fn build_drop_and_replace(&mut self,
824                                   block: BasicBlock,
825                                   span: Span,
826                                   location: Place<'tcx>,
827                                   value: Operand<'tcx>) -> BlockAnd<()> {
828         let source_info = self.source_info(span);
829         let next_target = self.cfg.start_new_block();
830         let diverge_target = self.diverge_cleanup();
831         self.cfg.terminate(block, source_info,
832                            TerminatorKind::DropAndReplace {
833                                location,
834                                value,
835                                target: next_target,
836                                unwind: Some(diverge_target),
837                            });
838         next_target.unit()
839     }
840
841     /// Create an Assert terminator and return the success block.
842     /// If the boolean condition operand is not the expected value,
843     /// a runtime panic will be caused with the given message.
844     pub fn assert(&mut self, block: BasicBlock,
845                   cond: Operand<'tcx>,
846                   expected: bool,
847                   msg: AssertMessage<'tcx>,
848                   span: Span)
849                   -> BasicBlock {
850         let source_info = self.source_info(span);
851
852         let success_block = self.cfg.start_new_block();
853         let cleanup = self.diverge_cleanup();
854
855         self.cfg.terminate(block, source_info,
856                            TerminatorKind::Assert {
857                                cond,
858                                expected,
859                                msg,
860                                target: success_block,
861                                cleanup: Some(cleanup),
862                            });
863
864         success_block
865     }
866 }
867
868 /// Builds drops for pop_scope and exit_scope.
869 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
870                            resume_block: BasicBlock,
871                            scope: &Scope<'tcx>,
872                            earlier_scopes: &[Scope<'tcx>],
873                            mut block: BasicBlock,
874                            arg_count: usize,
875                            generator_drop: bool)
876                            -> BlockAnd<()> {
877     debug!("build_scope_drops({:?} -> {:?})", block, scope);
878     let mut iter = scope.drops.iter().rev();
879     while let Some(drop_data) = iter.next() {
880         let source_info = scope.source_info(drop_data.span);
881         match drop_data.kind {
882             DropKind::Value { .. } => {
883                 // Try to find the next block with its cached block for us to
884                 // diverge into, either a previous block in this current scope or
885                 // the top of the previous scope.
886                 //
887                 // If it wasn't for EndRegion, we could just chain all the DropData
888                 // together and pick the first DropKind::Value. Please do that
889                 // when we replace EndRegion with NLL.
890                 let on_diverge = iter.clone().filter_map(|dd| {
891                     match dd.kind {
892                         DropKind::Value { cached_block } => Some(cached_block),
893                         DropKind::Storage => None
894                     }
895                 }).next().or_else(|| {
896                     if earlier_scopes.iter().any(|scope| scope.needs_cleanup) {
897                         // If *any* scope requires cleanup code to be run,
898                         // we must use the cached unwind from the *topmost*
899                         // scope, to ensure all EndRegions from surrounding
900                         // scopes are executed before the drop code runs.
901                         Some(earlier_scopes.last().unwrap().cached_unwind)
902                     } else {
903                         // We don't need any further cleanup, so return None
904                         // to avoid creating a landing pad. We can skip
905                         // EndRegions because all local regions end anyway
906                         // when the function unwinds.
907                         //
908                         // This is an important optimization because LLVM is
909                         // terrible at optimizing landing pads. FIXME: I think
910                         // it would be cleaner and better to do this optimization
911                         // in SimplifyCfg instead of here.
912                         None
913                     }
914                 });
915
916                 let on_diverge = on_diverge.map(|cached_block| {
917                     cached_block.get(generator_drop).unwrap_or_else(|| {
918                         span_bug!(drop_data.span, "cached block not present?")
919                     })
920                 });
921
922                 let next = cfg.start_new_block();
923                 cfg.terminate(block, source_info, TerminatorKind::Drop {
924                     location: drop_data.location.clone(),
925                     target: next,
926                     unwind: Some(on_diverge.unwrap_or(resume_block))
927                 });
928                 block = next;
929             }
930             DropKind::Storage => {
931                 // We do not need to emit StorageDead for generator drops
932                 if generator_drop {
933                     continue
934                 }
935
936                 // Drop the storage for both value and storage drops.
937                 // Only temps and vars need their storage dead.
938                 match drop_data.location {
939                     Place::Local(index) if index.index() > arg_count => {
940                         cfg.push(block, Statement {
941                             source_info,
942                             kind: StatementKind::StorageDead(index)
943                         });
944                     }
945                     _ => unreachable!(),
946                 }
947             }
948         }
949     }
950     block.unit()
951 }
952
953 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
954                                        cfg: &mut CFG<'tcx>,
955                                        span: Span,
956                                        scope: &mut Scope<'tcx>,
957                                        mut target: BasicBlock,
958                                        generator_drop: bool)
959                                        -> BasicBlock
960 {
961     // Build up the drops in **reverse** order. The end result will
962     // look like:
963     //
964     //    [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
965     //    |                                                         |
966     //    +---------------------------------------------------------+
967     //     code for scope
968     //
969     // The code in this function reads from right to left. At each
970     // point, we check for cached blocks representing the
971     // remainder. If everything is cached, we'll just walk right to
972     // left reading the cached results but never create anything.
973
974     let source_scope = scope.source_scope;
975     let source_info = |span| SourceInfo {
976         span,
977         scope: source_scope
978     };
979
980     // Next, build up the drops. Here we iterate the vector in
981     // *forward* order, so that we generate drops[0] first (right to
982     // left in diagram above).
983     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
984         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
985         // Only full value drops are emitted in the diverging path,
986         // not StorageDead.
987         //
988         // Note: This may not actually be what we desire (are we
989         // "freeing" stack storage as we unwind, or merely observing a
990         // frozen stack)? In particular, the intent may have been to
991         // match the behavior of clang, but on inspection eddyb says
992         // this is not what clang does.
993         let cached_block = match drop_data.kind {
994             DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
995             DropKind::Storage => continue
996         };
997         target = if let Some(cached_block) = *cached_block {
998             cached_block
999         } else {
1000             let block = cfg.start_new_cleanup_block();
1001             cfg.terminate(block, source_info(drop_data.span),
1002                           TerminatorKind::Drop {
1003                               location: drop_data.location.clone(),
1004                               target,
1005                               unwind: None
1006                           });
1007             *cached_block = Some(block);
1008             block
1009         };
1010     }
1011
1012     // Finally, push the EndRegion block, used by mir-borrowck, and set
1013     // `cached_unwind` to point to it (Block becomes trivial goto after
1014     // pass that removes all EndRegions).
1015     target = {
1016         let cached_block = scope.cached_unwind.ref_mut(generator_drop);
1017         if let Some(cached_block) = *cached_block {
1018             cached_block
1019         } else {
1020             let block = cfg.start_new_cleanup_block();
1021             cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
1022             cfg.terminate(block, source_info(span), TerminatorKind::Goto { target });
1023             *cached_block = Some(block);
1024             block
1025         }
1026     };
1027
1028     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
1029
1030     target
1031 }