]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/build/scope.rs
Remove unnecessary lift calls
[rust.git] / src / librustc_mir / build / scope.rs
1 /*!
2 Managing the scope stack. The scopes are tied to lexical scopes, so as
3 we descend the HAIR, we push a scope on the stack, build its
4 contents, and then pop it off. Every scope is named by a
5 `region::Scope`.
6
7 ### SEME Regions
8
9 When pushing a new scope, we record the current point in the graph (a
10 basic block); this marks the entry to the scope. We then generate more
11 stuff in the control-flow graph. Whenever the scope is exited, either
12 via a `break` or `return` or just by fallthrough, that marks an exit
13 from the scope. Each lexical scope thus corresponds to a single-entry,
14 multiple-exit (SEME) region in the control-flow graph.
15
16 For now, we keep a mapping from each `region::Scope` to its
17 corresponding SEME region for later reference (see caveat in next
18 paragraph). This is because region scopes are tied to
19 them. Eventually, when we shift to non-lexical lifetimes, there should
20 be no need to remember this mapping.
21
22 ### Not so SEME Regions
23
24 In the course of building matches, it sometimes happens that certain code
25 (namely guards) gets executed multiple times. This means that the scope lexical
26 scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
27 mapping is from one scope to a vector of SEME regions.
28
29 Also in matches, the scopes assigned to arms are not even SEME regions! Each
30 arm has a single region with one entry for each pattern. We manually
31 manipulate the scheduled drops in this scope to avoid dropping things multiple
32 times, although drop elaboration would clean this up for value drops.
33
34 ### Drops
35
36 The primary purpose for scopes is to insert drops: while building
37 the contents, we also accumulate places that need to be dropped upon
38 exit from each scope. This is done by calling `schedule_drop`. Once a
39 drop is scheduled, whenever we branch out we will insert drops of all
40 those places onto the outgoing edge. Note that we don't know the full
41 set of scheduled drops up front, and so whenever we exit from the
42 scope we only drop the values scheduled thus far. For example, consider
43 the scope S corresponding to this loop:
44
45 ```
46 # let cond = true;
47 loop {
48     let x = ..;
49     if cond { break; }
50     let y = ..;
51 }
52 ```
53
54 When processing the `let x`, we will add one drop to the scope for
55 `x`.  The break will then insert a drop for `x`. When we process `let
56 y`, we will add another drop (in fact, to a subscope, but let's ignore
57 that for now); any later drops would also drop `y`.
58
59 ### Early exit
60
61 There are numerous "normal" ways to early exit a scope: `break`,
62 `continue`, `return` (panics are handled separately). Whenever an
63 early exit occurs, the method `exit_scope` is called. It is given the
64 current point in execution where the early exit occurs, as well as the
65 scope you want to branch to (note that all early exits from to some
66 other enclosing scope). `exit_scope` will record this exit point and
67 also add all drops.
68
69 Panics are handled in a similar fashion, except that a panic always
70 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
71 `panic(p)` with the current point `p`. Or else you can call
72 `diverge_cleanup`, which will produce a block that you can branch to
73 which does the appropriate cleanup and then diverges. `panic(p)`
74 simply calls `diverge_cleanup()` and adds an edge from `p` to the
75 result.
76
77 ### Loop scopes
78
79 In addition to the normal scope stack, we track a loop scope stack
80 that contains only loops. It tracks where a `break` and `continue`
81 should go to.
82
83 */
84
85 use crate::build::{BlockAnd, BlockAndExtension, Builder, CFG};
86 use crate::hair::LintLevel;
87 use rustc::middle::region;
88 use rustc::ty::Ty;
89 use rustc::hir;
90 use rustc::mir::*;
91 use syntax_pos::{DUMMY_SP, Span};
92 use rustc_data_structures::fx::FxHashMap;
93 use std::collections::hash_map::Entry;
94 use std::mem;
95
96 #[derive(Debug)]
97 pub struct Scope<'tcx> {
98     /// The source scope this scope was created in.
99     source_scope: SourceScope,
100
101     /// the region span of this scope within source code.
102     region_scope: region::Scope,
103
104     /// the span of that region_scope
105     region_scope_span: Span,
106
107     /// Whether there's anything to do for the cleanup path, that is,
108     /// when unwinding through this scope. This includes destructors,
109     /// but not StorageDead statements, which don't get emitted at all
110     /// for unwinding, for several reasons:
111     ///  * clang doesn't emit llvm.lifetime.end for C++ unwinding
112     ///  * LLVM's memory dependency analysis can't handle it atm
113     ///  * polluting the cleanup MIR with StorageDead creates
114     ///    landing pads even though there's no actual destructors
115     ///  * freeing up stack space has no effect during unwinding
116     /// Note that for generators we do emit StorageDeads, for the
117     /// use of optimizations in the MIR generator transform.
118     needs_cleanup: bool,
119
120     /// set of places to drop when exiting this scope. This starts
121     /// out empty but grows as variables are declared during the
122     /// building process. This is a stack, so we always drop from the
123     /// end of the vector (top of the stack) first.
124     drops: Vec<DropData<'tcx>>,
125
126     /// The cache for drop chain on “normal” exit into a particular BasicBlock.
127     cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
128
129     /// The cache for drop chain on "generator drop" exit.
130     cached_generator_drop: Option<BasicBlock>,
131
132     /// The cache for drop chain on "unwind" exit.
133     cached_unwind: CachedBlock,
134 }
135
136 #[derive(Debug)]
137 struct DropData<'tcx> {
138     /// span where drop obligation was incurred (typically where place was declared)
139     span: Span,
140
141     /// place to drop
142     location: Place<'tcx>,
143
144     /// Whether this is a value Drop or a StorageDead.
145     kind: DropKind,
146
147     /// The cached blocks for unwinds.
148     cached_block: CachedBlock,
149 }
150
151 #[derive(Debug, Default, Clone, Copy)]
152 struct CachedBlock {
153     /// The cached block for the cleanups-on-diverge path. This block
154     /// contains code to run the current drop and all the preceding
155     /// drops (i.e., those having lower index in Drop’s Scope drop
156     /// array)
157     unwind: Option<BasicBlock>,
158
159     /// The cached block for unwinds during cleanups-on-generator-drop path
160     ///
161     /// This is split from the standard unwind path here to prevent drop
162     /// elaboration from creating drop flags that would have to be captured
163     /// by the generator. I'm not sure how important this optimization is,
164     /// but it is here.
165     generator_drop: Option<BasicBlock>,
166 }
167
168 #[derive(Debug)]
169 pub(crate) enum DropKind {
170     Value,
171     Storage,
172 }
173
174 #[derive(Clone, Debug)]
175 pub struct BreakableScope<'tcx> {
176     /// Region scope of the loop
177     pub region_scope: region::Scope,
178     /// Where the body of the loop begins. `None` if block
179     pub continue_block: Option<BasicBlock>,
180     /// Block to branch into when the loop or block terminates (either by being `break`-en out
181     /// from, or by having its condition to become false)
182     pub break_block: BasicBlock,
183     /// The destination of the loop/block expression itself (i.e., where to put the result of a
184     /// `break` expression)
185     pub break_destination: Place<'tcx>,
186 }
187
188 impl CachedBlock {
189     fn invalidate(&mut self) {
190         self.generator_drop = None;
191         self.unwind = None;
192     }
193
194     fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
195         if generator_drop {
196             self.generator_drop
197         } else {
198             self.unwind
199         }
200     }
201
202     fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
203         if generator_drop {
204             &mut self.generator_drop
205         } else {
206             &mut self.unwind
207         }
208     }
209 }
210
211 impl DropKind {
212     fn may_panic(&self) -> bool {
213         match *self {
214             DropKind::Value => true,
215             DropKind::Storage => false
216         }
217     }
218 }
219
220 impl<'tcx> Scope<'tcx> {
221     /// Invalidates all the cached blocks in the scope.
222     ///
223     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
224     /// larger extent of code.
225     ///
226     /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
227     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
228     /// top-of-scope (as opposed to dependent scopes).
229     fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) {
230         // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
231         // with lots of `try!`?
232
233         // cached exits drop storage and refer to the top-of-scope
234         self.cached_exits.clear();
235
236         // the current generator drop and unwind refer to top-of-scope
237         self.cached_generator_drop = None;
238
239         let ignore_unwinds = storage_only && !is_generator;
240         if !ignore_unwinds {
241             self.cached_unwind.invalidate();
242         }
243
244         if !ignore_unwinds && !this_scope_only {
245             for drop_data in &mut self.drops {
246                 drop_data.cached_block.invalidate();
247             }
248         }
249     }
250
251     /// Given a span and this scope's source scope, make a SourceInfo.
252     fn source_info(&self, span: Span) -> SourceInfo {
253         SourceInfo {
254             span,
255             scope: self.source_scope
256         }
257     }
258 }
259
260 impl<'a, 'tcx> Builder<'a, 'tcx> {
261     // Adding and removing scopes
262     // ==========================
263     /// Start a breakable scope, which tracks where `continue` and `break`
264     /// should branch to. See module comment for more details.
265     ///
266     /// Returns the might_break attribute of the BreakableScope used.
267     pub fn in_breakable_scope<F, R>(&mut self,
268                                     loop_block: Option<BasicBlock>,
269                                     break_block: BasicBlock,
270                                     break_destination: Place<'tcx>,
271                                     f: F) -> R
272         where F: FnOnce(&mut Builder<'a, 'tcx>) -> R
273     {
274         let region_scope = self.topmost_scope();
275         let scope = BreakableScope {
276             region_scope,
277             continue_block: loop_block,
278             break_block,
279             break_destination,
280         };
281         self.breakable_scopes.push(scope);
282         let res = f(self);
283         let breakable_scope = self.breakable_scopes.pop().unwrap();
284         assert!(breakable_scope.region_scope == region_scope);
285         res
286     }
287
288     pub fn in_opt_scope<F, R>(&mut self,
289                               opt_scope: Option<(region::Scope, SourceInfo)>,
290                               f: F)
291                               -> BlockAnd<R>
292         where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>
293     {
294         debug!("in_opt_scope(opt_scope={:?})", opt_scope);
295         if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
296         let mut block;
297         let rv = unpack!(block = f(self));
298         if let Some(region_scope) = opt_scope {
299             unpack!(block = self.pop_scope(region_scope, block));
300         }
301         debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
302         block.and(rv)
303     }
304
305     /// Convenience wrapper that pushes a scope and then executes `f`
306     /// to build its contents, popping the scope afterwards.
307     pub fn in_scope<F, R>(&mut self,
308                           region_scope: (region::Scope, SourceInfo),
309                           lint_level: LintLevel,
310                           f: F)
311                           -> BlockAnd<R>
312         where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>
313     {
314         debug!("in_scope(region_scope={:?})", region_scope);
315         let source_scope = self.source_scope;
316         let tcx = self.hir.tcx();
317         if let LintLevel::Explicit(current_hir_id) = lint_level {
318             // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
319             // to avoid adding Hir dependences on our parents.
320             // We estimate the true lint roots here to avoid creating a lot of source scopes.
321
322             let parent_root = tcx.maybe_lint_level_root_bounded(
323                 self.source_scope_local_data[source_scope].lint_root,
324                 self.hir.root_lint_level,
325             );
326             let current_root = tcx.maybe_lint_level_root_bounded(
327                 current_hir_id,
328                 self.hir.root_lint_level
329             );
330
331             if parent_root != current_root {
332                 self.source_scope = self.new_source_scope(
333                     region_scope.1.span,
334                     LintLevel::Explicit(current_root),
335                     None
336                 );
337             }
338         }
339         self.push_scope(region_scope);
340         let mut block;
341         let rv = unpack!(block = f(self));
342         unpack!(block = self.pop_scope(region_scope, block));
343         self.source_scope = source_scope;
344         debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
345         block.and(rv)
346     }
347
348     /// Push a scope onto the stack. You can then build code in this
349     /// scope and call `pop_scope` afterwards. Note that these two
350     /// calls must be paired; using `in_scope` as a convenience
351     /// wrapper maybe preferable.
352     pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
353         debug!("push_scope({:?})", region_scope);
354         let vis_scope = self.source_scope;
355         self.scopes.push(Scope {
356             source_scope: vis_scope,
357             region_scope: region_scope.0,
358             region_scope_span: region_scope.1.span,
359             needs_cleanup: false,
360             drops: vec![],
361             cached_generator_drop: None,
362             cached_exits: Default::default(),
363             cached_unwind: CachedBlock::default(),
364         });
365     }
366
367     /// Pops a scope, which should have region scope `region_scope`,
368     /// adding any drops onto the end of `block` that are needed.
369     /// This must match 1-to-1 with `push_scope`.
370     pub fn pop_scope(&mut self,
371                      region_scope: (region::Scope, SourceInfo),
372                      mut block: BasicBlock)
373                      -> BlockAnd<()> {
374         debug!("pop_scope({:?}, {:?})", region_scope, block);
375         // If we are emitting a `drop` statement, we need to have the cached
376         // diverge cleanup pads ready in case that drop panics.
377         let may_panic =
378             self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
379         if may_panic {
380             self.diverge_cleanup();
381         }
382         let scope = self.scopes.pop().unwrap();
383         assert_eq!(scope.region_scope, region_scope.0);
384
385         let unwind_to = self.scopes.last().and_then(|next_scope| {
386             next_scope.cached_unwind.get(false)
387         }).unwrap_or_else(|| self.resume_block());
388
389         unpack!(block = build_scope_drops(
390             &mut self.cfg,
391             self.is_generator,
392             &scope,
393             block,
394             unwind_to,
395             self.arg_count,
396             false,
397         ));
398
399         block.unit()
400     }
401
402
403     /// Branch out of `block` to `target`, exiting all scopes up to
404     /// and including `region_scope`. This will insert whatever drops are
405     /// needed. See module comment for details.
406     pub fn exit_scope(&mut self,
407                       span: Span,
408                       region_scope: (region::Scope, SourceInfo),
409                       mut block: BasicBlock,
410                       target: BasicBlock) {
411         debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
412                region_scope, block, target);
413         let scope_count = 1 + self.scopes.iter().rev()
414             .position(|scope| scope.region_scope == region_scope.0)
415             .unwrap_or_else(|| {
416                 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
417             });
418         let len = self.scopes.len();
419         assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
420
421         // If we are emitting a `drop` statement, we need to have the cached
422         // diverge cleanup pads ready in case that drop panics.
423         let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup);
424         if may_panic {
425             self.diverge_cleanup();
426         }
427
428         let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev();
429         let mut scope = scopes.next().unwrap();
430         for next_scope in scopes {
431             if scope.drops.is_empty() {
432                 scope = next_scope;
433                 continue;
434             }
435             let source_info = scope.source_info(span);
436             block = match scope.cached_exits.entry((target, region_scope.0)) {
437                 Entry::Occupied(e) => {
438                     self.cfg.terminate(block, source_info,
439                                     TerminatorKind::Goto { target: *e.get() });
440                     return;
441                 }
442                 Entry::Vacant(v) => {
443                     let b = self.cfg.start_new_block();
444                     self.cfg.terminate(block, source_info,
445                                     TerminatorKind::Goto { target: b });
446                     v.insert(b);
447                     b
448                 }
449             };
450
451             let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| {
452                 debug_assert!(!may_panic, "cached block not present?");
453                 START_BLOCK
454             });
455
456             unpack!(block = build_scope_drops(
457                 &mut self.cfg,
458                 self.is_generator,
459                 scope,
460                 block,
461                 unwind_to,
462                 self.arg_count,
463                 false,
464             ));
465
466             scope = next_scope;
467         }
468
469         let scope = &self.scopes[len - scope_count];
470         self.cfg.terminate(block, scope.source_info(span),
471                            TerminatorKind::Goto { target });
472     }
473
474     /// Creates a path that performs all required cleanup for dropping a generator.
475     ///
476     /// This path terminates in GeneratorDrop. Returns the start of the path.
477     /// None indicates there’s no cleanup to do at this point.
478     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
479         // Fill in the cache for unwinds
480         self.diverge_cleanup_gen(true);
481
482         let src_info = self.scopes[0].source_info(self.fn_span);
483         let resume_block = self.resume_block();
484         let mut scopes = self.scopes.iter_mut().rev().peekable();
485         let mut block = self.cfg.start_new_block();
486         let result = block;
487
488         while let Some(scope) = scopes.next() {
489             block = if let Some(b) = scope.cached_generator_drop {
490                 self.cfg.terminate(block, src_info,
491                                    TerminatorKind::Goto { target: b });
492                 return Some(result);
493             } else {
494                 let b = self.cfg.start_new_block();
495                 scope.cached_generator_drop = Some(b);
496                 self.cfg.terminate(block, src_info,
497                                    TerminatorKind::Goto { target: b });
498                 b
499             };
500
501             let unwind_to = scopes.peek().as_ref().map(|scope| {
502                 scope.cached_unwind.get(true).unwrap_or_else(|| {
503                     span_bug!(src_info.span, "cached block not present?")
504                 })
505             }).unwrap_or(resume_block);
506
507             unpack!(block = build_scope_drops(
508                 &mut self.cfg,
509                 self.is_generator,
510                 scope,
511                 block,
512                 unwind_to,
513                 self.arg_count,
514                 true,
515             ));
516         }
517
518         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
519
520         Some(result)
521     }
522
523     /// Creates a new source scope, nested in the current one.
524     pub fn new_source_scope(&mut self,
525                             span: Span,
526                             lint_level: LintLevel,
527                             safety: Option<Safety>) -> SourceScope {
528         let parent = self.source_scope;
529         debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
530                span, lint_level, safety,
531                parent, self.source_scope_local_data.get(parent));
532         let scope = self.source_scopes.push(SourceScopeData {
533             span,
534             parent_scope: Some(parent),
535         });
536         let scope_local_data = SourceScopeLocalData {
537             lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
538                 lint_root
539             } else {
540                 self.source_scope_local_data[parent].lint_root
541             },
542             safety: safety.unwrap_or_else(|| {
543                 self.source_scope_local_data[parent].safety
544             })
545         };
546         self.source_scope_local_data.push(scope_local_data);
547         scope
548     }
549
550     // Finding scopes
551     // ==============
552     /// Finds the breakable scope for a given label. This is used for
553     /// resolving `break` and `continue`.
554     pub fn find_breakable_scope(&self,
555                                 span: Span,
556                                 label: region::Scope)
557                                 -> &BreakableScope<'tcx> {
558         // find the loop-scope with the correct id
559         self.breakable_scopes.iter()
560             .rev()
561             .filter(|breakable_scope| breakable_scope.region_scope == label)
562             .next()
563             .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
564     }
565
566     /// Given a span and the current source scope, make a SourceInfo.
567     pub fn source_info(&self, span: Span) -> SourceInfo {
568         SourceInfo {
569             span,
570             scope: self.source_scope
571         }
572     }
573
574     /// Returns the `region::Scope` of the scope which should be exited by a
575     /// return.
576     pub fn region_scope_of_return_scope(&self) -> region::Scope {
577         // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
578         // We want `scopes[1]`, which is the `ParameterScope`.
579         assert!(self.scopes.len() >= 2);
580         assert!(match self.scopes[1].region_scope.data {
581             region::ScopeData::Arguments => true,
582             _ => false,
583         });
584         self.scopes[1].region_scope
585     }
586
587     /// Returns the topmost active scope, which is known to be alive until
588     /// the next scope expression.
589     pub fn topmost_scope(&self) -> region::Scope {
590         self.scopes.last().expect("topmost_scope: no scopes present").region_scope
591     }
592
593     /// Returns the scope that we should use as the lifetime of an
594     /// operand. Basically, an operand must live until it is consumed.
595     /// This is similar to, but not quite the same as, the temporary
596     /// scope (which can be larger or smaller).
597     ///
598     /// Consider:
599     ///
600     ///     let x = foo(bar(X, Y));
601     ///
602     /// We wish to pop the storage for X and Y after `bar()` is
603     /// called, not after the whole `let` is completed.
604     ///
605     /// As another example, if the second argument diverges:
606     ///
607     ///     foo(Box::new(2), panic!())
608     ///
609     /// We would allocate the box but then free it on the unwinding
610     /// path; we would also emit a free on the 'success' path from
611     /// panic, but that will turn out to be removed as dead-code.
612     ///
613     /// When building statics/constants, returns `None` since
614     /// intermediate values do not have to be dropped in that case.
615     pub fn local_scope(&self) -> Option<region::Scope> {
616         match self.hir.body_owner_kind {
617             hir::BodyOwnerKind::Const |
618             hir::BodyOwnerKind::Static(_) =>
619                 // No need to free storage in this context.
620                 None,
621             hir::BodyOwnerKind::Closure |
622             hir::BodyOwnerKind::Fn =>
623                 Some(self.topmost_scope()),
624         }
625     }
626
627     // Schedule an abort block - this is used for some ABIs that cannot unwind
628     pub fn schedule_abort(&mut self) -> BasicBlock {
629         self.scopes[0].needs_cleanup = true;
630         let abortblk = self.cfg.start_new_cleanup_block();
631         let source_info = self.scopes[0].source_info(self.fn_span);
632         self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
633         self.cached_resume_block = Some(abortblk);
634         abortblk
635     }
636
637     pub fn schedule_drop_storage_and_value(
638         &mut self,
639         span: Span,
640         region_scope: region::Scope,
641         place: &Place<'tcx>,
642         place_ty: Ty<'tcx>,
643     ) {
644         self.schedule_drop(span, region_scope, place, place_ty, DropKind::Storage);
645         self.schedule_drop(span, region_scope, place, place_ty, DropKind::Value);
646     }
647
648     // Scheduling drops
649     // ================
650     /// Indicates that `place` should be dropped on exit from
651     /// `region_scope`.
652     ///
653     /// When called with `DropKind::Storage`, `place` should be a local
654     /// with an index higher than the current `self.arg_count`.
655     pub fn schedule_drop(
656         &mut self,
657         span: Span,
658         region_scope: region::Scope,
659         place: &Place<'tcx>,
660         place_ty: Ty<'tcx>,
661         drop_kind: DropKind,
662     ) {
663         let needs_drop = self.hir.needs_drop(place_ty);
664         match drop_kind {
665             DropKind::Value => if !needs_drop { return },
666             DropKind::Storage => {
667                 match *place {
668                     Place::Base(PlaceBase::Local(index)) => if index.index() <= self.arg_count {
669                         span_bug!(
670                             span, "`schedule_drop` called with index {} and arg_count {}",
671                             index.index(),
672                             self.arg_count,
673                         )
674                     },
675                     _ => span_bug!(
676                         span, "`schedule_drop` called with non-`Local` place {:?}", place
677                     ),
678                 }
679             }
680         }
681
682         for scope in self.scopes.iter_mut().rev() {
683             let this_scope = scope.region_scope == region_scope;
684             // When building drops, we try to cache chains of drops in such a way so these drops
685             // could be reused by the drops which would branch into the cached (already built)
686             // blocks.  This, however, means that whenever we add a drop into a scope which already
687             // had some blocks built (and thus, cached) for it, we must invalidate all caches which
688             // might branch into the scope which had a drop just added to it. This is necessary,
689             // because otherwise some other code might use the cache to branch into already built
690             // chain of drops, essentially ignoring the newly added drop.
691             //
692             // For example consider there’s two scopes with a drop in each. These are built and
693             // thus the caches are filled:
694             //
695             // +--------------------------------------------------------+
696             // | +---------------------------------+                    |
697             // | | +--------+     +-------------+  |  +---------------+ |
698             // | | | return | <-+ | drop(outer) | <-+ |  drop(middle) | |
699             // | | +--------+     +-------------+  |  +---------------+ |
700             // | +------------|outer_scope cache|--+                    |
701             // +------------------------------|middle_scope cache|------+
702             //
703             // Now, a new, inner-most scope is added along with a new drop into both inner-most and
704             // outer-most scopes:
705             //
706             // +------------------------------------------------------------+
707             // | +----------------------------------+                       |
708             // | | +--------+      +-------------+  |   +---------------+   | +-------------+
709             // | | | return | <+   | drop(new)   | <-+  |  drop(middle) | <--+| drop(inner) |
710             // | | +--------+  |   | drop(outer) |  |   +---------------+   | +-------------+
711             // | |             +-+ +-------------+  |                       |
712             // | +---|invalid outer_scope cache|----+                       |
713             // +----=----------------|invalid middle_scope cache|-----------+
714             //
715             // If, when adding `drop(new)` we do not invalidate the cached blocks for both
716             // outer_scope and middle_scope, then, when building drops for the inner (right-most)
717             // scope, the old, cached blocks, without `drop(new)` will get used, producing the
718             // wrong results.
719             //
720             // The cache and its invalidation for unwind branch is somewhat special. The cache is
721             // per-drop, rather than per scope, which has a several different implications. Adding
722             // a new drop into a scope will not invalidate cached blocks of the prior drops in the
723             // scope. That is true, because none of the already existing drops will have an edge
724             // into a block with the newly added drop.
725             //
726             // Note that this code iterates scopes from the inner-most to the outer-most,
727             // invalidating caches of each scope visited. This way bare minimum of the
728             // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
729             // cache of outer scope stays intact.
730             scope.invalidate_cache(!needs_drop, self.is_generator, this_scope);
731             if this_scope {
732                 if let DropKind::Value = drop_kind {
733                     scope.needs_cleanup = true;
734                 }
735
736                 let region_scope_span = region_scope.span(self.hir.tcx(),
737                                                           &self.hir.region_scope_tree);
738                 // Attribute scope exit drops to scope's closing brace.
739                 let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
740
741                 scope.drops.push(DropData {
742                     span: scope_end,
743                     location: place.clone(),
744                     kind: drop_kind,
745                     cached_block: CachedBlock::default(),
746                 });
747                 return;
748             }
749         }
750         span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
751     }
752
753     // Other
754     // =====
755     /// Creates a path that performs all required cleanup for unwinding.
756     ///
757     /// This path terminates in Resume. Returns the start of the path.
758     /// See module comment for more details.
759     pub fn diverge_cleanup(&mut self) -> BasicBlock {
760         self.diverge_cleanup_gen(false)
761     }
762
763     fn resume_block(&mut self) -> BasicBlock {
764         if let Some(target) = self.cached_resume_block {
765             target
766         } else {
767             let resumeblk = self.cfg.start_new_cleanup_block();
768             self.cfg.terminate(resumeblk,
769                                SourceInfo {
770                                    scope: OUTERMOST_SOURCE_SCOPE,
771                                    span: self.fn_span
772                                },
773                                TerminatorKind::Resume);
774             self.cached_resume_block = Some(resumeblk);
775             resumeblk
776         }
777     }
778
779     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
780         // Build up the drops in **reverse** order. The end result will
781         // look like:
782         //
783         //    scopes[n] -> scopes[n-1] -> ... -> scopes[0]
784         //
785         // However, we build this in **reverse order**. That is, we
786         // process scopes[0], then scopes[1], etc, pointing each one at
787         // the result generates from the one before. Along the way, we
788         // store caches. If everything is cached, we'll just walk right
789         // to left reading the cached results but never created anything.
790
791         // Find the last cached block
792         debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes);
793         let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter()
794             .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) {
795             (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1)
796         } else {
797             (self.resume_block(), 0)
798         };
799
800         for scope in self.scopes[first_uncached..].iter_mut() {
801             target = build_diverge_scope(&mut self.cfg, scope.region_scope_span,
802                                          scope, target, generator_drop, self.is_generator);
803         }
804
805         target
806     }
807
808     /// Utility function for *non*-scope code to build their own drops
809     pub fn build_drop(&mut self,
810                       block: BasicBlock,
811                       span: Span,
812                       location: Place<'tcx>,
813                       ty: Ty<'tcx>) -> BlockAnd<()> {
814         if !self.hir.needs_drop(ty) {
815             return block.unit();
816         }
817         let source_info = self.source_info(span);
818         let next_target = self.cfg.start_new_block();
819         let diverge_target = self.diverge_cleanup();
820         self.cfg.terminate(block, source_info,
821                            TerminatorKind::Drop {
822                                location,
823                                target: next_target,
824                                unwind: Some(diverge_target),
825                            });
826         next_target.unit()
827     }
828
829     /// Utility function for *non*-scope code to build their own drops
830     pub fn build_drop_and_replace(&mut self,
831                                   block: BasicBlock,
832                                   span: Span,
833                                   location: Place<'tcx>,
834                                   value: Operand<'tcx>) -> BlockAnd<()> {
835         let source_info = self.source_info(span);
836         let next_target = self.cfg.start_new_block();
837         let diverge_target = self.diverge_cleanup();
838         self.cfg.terminate(block, source_info,
839                            TerminatorKind::DropAndReplace {
840                                location,
841                                value,
842                                target: next_target,
843                                unwind: Some(diverge_target),
844                            });
845         next_target.unit()
846     }
847
848     /// Creates an Assert terminator and return the success block.
849     /// If the boolean condition operand is not the expected value,
850     /// a runtime panic will be caused with the given message.
851     pub fn assert(&mut self, block: BasicBlock,
852                   cond: Operand<'tcx>,
853                   expected: bool,
854                   msg: AssertMessage<'tcx>,
855                   span: Span)
856                   -> BasicBlock {
857         let source_info = self.source_info(span);
858
859         let success_block = self.cfg.start_new_block();
860         let cleanup = self.diverge_cleanup();
861
862         self.cfg.terminate(block, source_info,
863                            TerminatorKind::Assert {
864                                cond,
865                                expected,
866                                msg,
867                                target: success_block,
868                                cleanup: Some(cleanup),
869                            });
870
871         success_block
872     }
873
874     // `match` arm scopes
875     // ==================
876     /// Unschedules any drops in the top scope.
877     ///
878     /// This is only needed for `match` arm scopes, because they have one
879     /// entrance per pattern, but only one exit.
880     pub fn clear_top_scope(&mut self, region_scope: region::Scope) {
881         let top_scope = self.scopes.last_mut().unwrap();
882
883         assert_eq!(top_scope.region_scope, region_scope);
884
885         top_scope.drops.clear();
886         top_scope.invalidate_cache(false, self.is_generator, true);
887     }
888
889     /// Drops the single variable provided
890     ///
891     /// * The scope must be the top scope.
892     /// * The variable must be in that scope.
893     /// * The variable must be at the top of that scope: it's the next thing
894     ///   scheduled to drop.
895     /// * The drop must be of `DropKind::Storage`.
896     ///
897     /// This is used for the boolean holding the result of the match guard. We
898     /// do this because:
899     ///
900     /// * The boolean is different for each pattern
901     /// * There is only one exit for the arm scope
902     /// * The guard expression scope is too short, it ends just before the
903     ///   boolean is tested.
904     pub fn pop_variable(
905         &mut self,
906         block: BasicBlock,
907         region_scope: region::Scope,
908         variable: Local,
909     ) {
910         let top_scope = self.scopes.last_mut().unwrap();
911
912         assert_eq!(top_scope.region_scope, region_scope);
913
914         let top_drop_data = top_scope.drops.pop().unwrap();
915
916         match top_drop_data.kind {
917             DropKind::Value { .. } => {
918                 bug!("Should not be calling pop_top_variable on non-copy type!")
919             }
920             DropKind::Storage => {
921                 // Drop the storage for both value and storage drops.
922                 // Only temps and vars need their storage dead.
923                 match top_drop_data.location {
924                     Place::Base(PlaceBase::Local(index)) => {
925                         let source_info = top_scope.source_info(top_drop_data.span);
926                         assert_eq!(index, variable);
927                         self.cfg.push(block, Statement {
928                             source_info,
929                             kind: StatementKind::StorageDead(index)
930                         });
931                     }
932                     _ => unreachable!(),
933                 }
934             }
935         }
936
937         top_scope.invalidate_cache(true, self.is_generator, true);
938     }
939
940 }
941
942 /// Builds drops for pop_scope and exit_scope.
943 fn build_scope_drops<'tcx>(
944     cfg: &mut CFG<'tcx>,
945     is_generator: bool,
946     scope: &Scope<'tcx>,
947     mut block: BasicBlock,
948     last_unwind_to: BasicBlock,
949     arg_count: usize,
950     generator_drop: bool,
951 ) -> BlockAnd<()> {
952     debug!("build_scope_drops({:?} -> {:?})", block, scope);
953
954     // Build up the drops in evaluation order. The end result will
955     // look like:
956     //
957     // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
958     //               |                    |                 |
959     //               :                    |                 |
960     //                                    V                 V
961     // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
962     //
963     // The horizontal arrows represent the execution path when the drops return
964     // successfully. The downwards arrows represent the execution path when the
965     // drops panic (panicking while unwinding will abort, so there's no need for
966     // another set of arrows).
967     //
968     // For generators, we unwind from a drop on a local to its StorageDead
969     // statement. For other functions we don't worry about StorageDead. The
970     // drops for the unwind path should have already been generated by
971     // `diverge_cleanup_gen`.
972
973     for drop_idx in (0..scope.drops.len()).rev() {
974         let drop_data = &scope.drops[drop_idx];
975         let source_info = scope.source_info(drop_data.span);
976         match drop_data.kind {
977             DropKind::Value => {
978                 let unwind_to = get_unwind_to(scope, is_generator, drop_idx, generator_drop)
979                     .unwrap_or(last_unwind_to);
980
981                 let next = cfg.start_new_block();
982                 cfg.terminate(block, source_info, TerminatorKind::Drop {
983                     location: drop_data.location.clone(),
984                     target: next,
985                     unwind: Some(unwind_to)
986                 });
987                 block = next;
988             }
989             DropKind::Storage => {
990                 // Drop the storage for both value and storage drops.
991                 // Only temps and vars need their storage dead.
992                 match drop_data.location {
993                     Place::Base(PlaceBase::Local(index)) if index.index() > arg_count => {
994                         cfg.push(block, Statement {
995                             source_info,
996                             kind: StatementKind::StorageDead(index)
997                         });
998                     }
999                     _ => unreachable!(),
1000                 }
1001             }
1002         }
1003     }
1004     block.unit()
1005 }
1006
1007 fn get_unwind_to<'tcx>(
1008     scope: &Scope<'tcx>,
1009     is_generator: bool,
1010     unwind_from: usize,
1011     generator_drop: bool,
1012 ) -> Option<BasicBlock> {
1013     for drop_idx in (0..unwind_from).rev() {
1014         let drop_data = &scope.drops[drop_idx];
1015         match (is_generator, &drop_data.kind) {
1016             (true, DropKind::Storage) => {
1017                 return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| {
1018                     span_bug!(drop_data.span, "cached block not present for {:?}", drop_data)
1019                 }));
1020             }
1021             (false, DropKind::Value) => {
1022                 return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| {
1023                     span_bug!(drop_data.span, "cached block not present for {:?}", drop_data)
1024                 }));
1025             }
1026             _ => (),
1027         }
1028     }
1029     None
1030 }
1031
1032 fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
1033                              span: Span,
1034                              scope: &mut Scope<'tcx>,
1035                              mut target: BasicBlock,
1036                              generator_drop: bool,
1037                              is_generator: bool)
1038                              -> BasicBlock
1039 {
1040     // Build up the drops in **reverse** order. The end result will
1041     // look like:
1042     //
1043     //    [drops[n]] -...-> [drops[0]] -> [target]
1044     //
1045     // The code in this function reads from right to left. At each
1046     // point, we check for cached blocks representing the
1047     // remainder. If everything is cached, we'll just walk right to
1048     // left reading the cached results but never create anything.
1049
1050     let source_scope = scope.source_scope;
1051     let source_info = |span| SourceInfo {
1052         span,
1053         scope: source_scope
1054     };
1055
1056     // We keep track of StorageDead statements to prepend to our current block
1057     // and store them here, in reverse order.
1058     let mut storage_deads = vec![];
1059
1060     let mut target_built_by_us = false;
1061
1062     // Build up the drops. Here we iterate the vector in
1063     // *forward* order, so that we generate drops[0] first (right to
1064     // left in diagram above).
1065     debug!("build_diverge_scope({:?})", scope.drops);
1066     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
1067         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
1068         // Only full value drops are emitted in the diverging path,
1069         // not StorageDead, except in the case of generators.
1070         //
1071         // Note: This may not actually be what we desire (are we
1072         // "freeing" stack storage as we unwind, or merely observing a
1073         // frozen stack)? In particular, the intent may have been to
1074         // match the behavior of clang, but on inspection eddyb says
1075         // this is not what clang does.
1076         match drop_data.kind {
1077             DropKind::Storage if is_generator => {
1078                 // Only temps and vars need their storage dead.
1079                 match drop_data.location {
1080                     Place::Base(PlaceBase::Local(index)) => {
1081                         storage_deads.push(Statement {
1082                             source_info: source_info(drop_data.span),
1083                             kind: StatementKind::StorageDead(index)
1084                         });
1085                         if !target_built_by_us {
1086                             // We cannot add statements to an existing block, so we create a new
1087                             // block for our StorageDead statements.
1088                             let block = cfg.start_new_cleanup_block();
1089                             let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope };
1090                             cfg.terminate(block, source_info,
1091                                           TerminatorKind::Goto { target: target });
1092                             target = block;
1093                             target_built_by_us = true;
1094                         }
1095                     }
1096                     _ => unreachable!(),
1097                 };
1098                 *drop_data.cached_block.ref_mut(generator_drop) = Some(target);
1099             }
1100             DropKind::Storage => {}
1101             DropKind::Value => {
1102                 let cached_block = drop_data.cached_block.ref_mut(generator_drop);
1103                 target = if let Some(cached_block) = *cached_block {
1104                     storage_deads.clear();
1105                     target_built_by_us = false;
1106                     cached_block
1107                 } else {
1108                     push_storage_deads(cfg, target, &mut storage_deads);
1109                     let block = cfg.start_new_cleanup_block();
1110                     cfg.terminate(block, source_info(drop_data.span),
1111                                   TerminatorKind::Drop {
1112                                       location: drop_data.location.clone(),
1113                                       target,
1114                                       unwind: None
1115                                   });
1116                     *cached_block = Some(block);
1117                     target_built_by_us = true;
1118                     block
1119                 };
1120             }
1121         };
1122     }
1123     push_storage_deads(cfg, target, &mut storage_deads);
1124     *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
1125
1126     assert!(storage_deads.is_empty());
1127     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
1128
1129     target
1130 }
1131
1132 fn push_storage_deads(cfg: &mut CFG<'tcx>,
1133                       target: BasicBlock,
1134                       storage_deads: &mut Vec<Statement<'tcx>>) {
1135     if storage_deads.is_empty() { return; }
1136     let statements = &mut cfg.block_data_mut(target).statements;
1137     storage_deads.reverse();
1138     debug!("push_storage_deads({:?}), storage_deads={:?}, statements={:?}",
1139            target, storage_deads, statements);
1140     storage_deads.append(statements);
1141     mem::swap(statements, storage_deads);
1142     assert!(storage_deads.is_empty());
1143 }