1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
59 When processing the `let x`, we will add one drop to the scope for
60 `x`. The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
92 use rustc::middle::region;
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::hir::def_id::LOCAL_CRATE;
96 use rustc::mir::transform::MirSource;
97 use syntax_pos::{Span};
98 use rustc_data_structures::indexed_vec::Idx;
99 use rustc_data_structures::fx::FxHashMap;
102 pub struct Scope<'tcx> {
103 /// The visibility scope this scope was created in.
104 visibility_scope: VisibilityScope,
106 /// the region span of this scope within source code.
107 region_scope: region::Scope,
109 /// the span of that region_scope
110 region_scope_span: Span,
112 /// Whether there's anything to do for the cleanup path, that is,
113 /// when unwinding through this scope. This includes destructors,
114 /// but not StorageDead statements, which don't get emitted at all
115 /// for unwinding, for several reasons:
116 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
117 /// * LLVM's memory dependency analysis can't handle it atm
118 /// * polluting the cleanup MIR with StorageDead creates
119 /// landing pads even though there's no actual destructors
120 /// * freeing up stack space has no effect during unwinding
123 /// set of lvalues to drop when exiting this scope. This starts
124 /// out empty but grows as variables are declared during the
125 /// building process. This is a stack, so we always drop from the
126 /// end of the vector (top of the stack) first.
127 drops: Vec<DropData<'tcx>>,
129 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
130 cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
132 /// The cache for drop chain on "generator drop" exit.
133 cached_generator_drop: Option<BasicBlock>,
137 struct DropData<'tcx> {
138 /// span where drop obligation was incurred (typically where lvalue was declared)
142 location: Lvalue<'tcx>,
144 /// Whether this is a full value Drop, or just a StorageDead.
148 #[derive(Debug, Default, Clone, Copy)]
150 /// The cached block for the cleanups-on-diverge path. This block
151 /// contains code to run the current drop and all the preceding
152 /// drops (i.e. those having lower index in Drop’s Scope drop
154 unwind: Option<BasicBlock>,
156 /// The cached block for unwinds during cleanups-on-generator-drop path
157 generator_drop: Option<BasicBlock>,
163 cached_block: CachedBlock,
168 #[derive(Clone, Debug)]
169 pub struct BreakableScope<'tcx> {
170 /// Region scope of the loop
171 pub region_scope: region::Scope,
172 /// Where the body of the loop begins. `None` if block
173 pub continue_block: Option<BasicBlock>,
174 /// Block to branch into when the loop or block terminates (either by being `break`-en out
175 /// from, or by having its condition to become false)
176 pub break_block: BasicBlock,
177 /// The destination of the loop/block expression itself (i.e. where to put the result of a
178 /// `break` expression)
179 pub break_destination: Lvalue<'tcx>,
183 fn invalidate(&mut self) {
184 self.generator_drop = None;
188 fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
196 fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
198 &mut self.generator_drop
206 fn may_panic(&self) -> bool {
208 DropKind::Value { .. } => true,
209 DropKind::Storage => false
214 impl<'tcx> Scope<'tcx> {
215 /// Invalidate all the cached blocks in the scope.
217 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
218 /// larger extent of code.
220 /// `unwind` controls whether caches for the unwind branch are also invalidated.
221 fn invalidate_cache(&mut self, unwind: bool) {
222 self.cached_exits.clear();
223 if !unwind { return; }
224 for dropdata in &mut self.drops {
225 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
226 cached_block.invalidate();
231 /// Returns the cached entrypoint for diverging exit from this scope.
233 /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
234 /// this method to work correctly.
235 fn cached_block(&self, generator_drop: bool) -> Option<BasicBlock> {
236 let mut drops = self.drops.iter().rev().filter_map(|data| {
238 DropKind::Value { cached_block } => {
239 Some(cached_block.get(generator_drop))
241 DropKind::Storage => None
244 if let Some(cached_block) = drops.next() {
245 Some(cached_block.expect("drop cache is not filled"))
251 /// Given a span and this scope's visibility scope, make a SourceInfo.
252 fn source_info(&self, span: Span) -> SourceInfo {
255 scope: self.visibility_scope
260 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
261 // Adding and removing scopes
262 // ==========================
263 /// Start a breakable scope, which tracks where `continue` and `break`
264 /// should branch to. See module comment for more details.
266 /// Returns the might_break attribute of the BreakableScope used.
267 pub fn in_breakable_scope<F, R>(&mut self,
268 loop_block: Option<BasicBlock>,
269 break_block: BasicBlock,
270 break_destination: Lvalue<'tcx>,
272 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
274 let region_scope = self.topmost_scope();
275 let scope = BreakableScope {
277 continue_block: loop_block,
281 self.breakable_scopes.push(scope);
283 let breakable_scope = self.breakable_scopes.pop().unwrap();
284 assert!(breakable_scope.region_scope == region_scope);
288 pub fn in_opt_scope<F, R>(&mut self,
289 opt_scope: Option<(region::Scope, SourceInfo)>,
290 mut block: BasicBlock,
293 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
295 debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
296 if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
297 let rv = unpack!(block = f(self));
298 if let Some(region_scope) = opt_scope {
299 unpack!(block = self.pop_scope(region_scope, block));
301 debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
305 /// Convenience wrapper that pushes a scope and then executes `f`
306 /// to build its contents, popping the scope afterwards.
307 pub fn in_scope<F, R>(&mut self,
308 region_scope: (region::Scope, SourceInfo),
309 lint_level: LintLevel,
310 mut block: BasicBlock,
313 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
315 debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
316 let visibility_scope = self.visibility_scope;
317 let tcx = self.hir.tcx();
318 if let LintLevel::Explicit(node_id) = lint_level {
319 let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
320 let sets = tcx.lint_levels(LOCAL_CRATE);
322 tcx.hir.definitions().node_to_hir_id(
323 self.visibility_scope_info[visibility_scope].lint_root
326 tcx.hir.definitions().node_to_hir_id(node_id);
327 sets.lint_level_set(parent_hir_id) ==
328 sets.lint_level_set(current_hir_id)
331 if !same_lint_scopes {
332 self.visibility_scope =
333 self.new_visibility_scope(region_scope.1.span, lint_level,
337 self.push_scope(region_scope);
338 let rv = unpack!(block = f(self));
339 unpack!(block = self.pop_scope(region_scope, block));
340 self.visibility_scope = visibility_scope;
341 debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
345 /// Push a scope onto the stack. You can then build code in this
346 /// scope and call `pop_scope` afterwards. Note that these two
347 /// calls must be paired; using `in_scope` as a convenience
348 /// wrapper maybe preferable.
349 pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
350 debug!("push_scope({:?})", region_scope);
351 let vis_scope = self.visibility_scope;
352 self.scopes.push(Scope {
353 visibility_scope: vis_scope,
354 region_scope: region_scope.0,
355 region_scope_span: region_scope.1.span,
356 needs_cleanup: false,
358 cached_generator_drop: None,
359 cached_exits: FxHashMap()
363 /// Pops a scope, which should have region scope `region_scope`,
364 /// adding any drops onto the end of `block` that are needed.
365 /// This must match 1-to-1 with `push_scope`.
366 pub fn pop_scope(&mut self,
367 region_scope: (region::Scope, SourceInfo),
368 mut block: BasicBlock)
370 debug!("pop_scope({:?}, {:?})", region_scope, block);
371 // If we are emitting a `drop` statement, we need to have the cached
372 // diverge cleanup pads ready in case that drop panics.
374 self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
376 self.diverge_cleanup();
378 let scope = self.scopes.pop().unwrap();
379 assert_eq!(scope.region_scope, region_scope.0);
381 self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
382 unpack!(block = build_scope_drops(&mut self.cfg,
393 /// Branch out of `block` to `target`, exiting all scopes up to
394 /// and including `region_scope`. This will insert whatever drops are
395 /// needed, as well as tracking this exit for the SEME region. See
396 /// module comment for details.
397 pub fn exit_scope(&mut self,
399 region_scope: (region::Scope, SourceInfo),
400 mut block: BasicBlock,
401 target: BasicBlock) {
402 debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
403 region_scope, block, target);
404 let scope_count = 1 + self.scopes.iter().rev()
405 .position(|scope| scope.region_scope == region_scope.0)
407 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
409 let len = self.scopes.len();
410 assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
412 // If we are emitting a `drop` statement, we need to have the cached
413 // diverge cleanup pads ready in case that drop panics.
414 let may_panic = self.scopes[(len - scope_count)..].iter()
415 .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
417 self.diverge_cleanup();
421 let mut rest = &mut self.scopes[(len - scope_count)..];
422 while let Some((scope, rest_)) = {rest}.split_last_mut() {
424 block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
425 self.cfg.terminate(block, scope.source_info(span),
426 TerminatorKind::Goto { target: e });
429 let b = self.cfg.start_new_block();
430 self.cfg.terminate(block, scope.source_info(span),
431 TerminatorKind::Goto { target: b });
432 scope.cached_exits.insert((target, region_scope.0), b);
436 // End all regions for scopes out of which we are breaking.
437 self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
439 unpack!(block = build_scope_drops(&mut self.cfg,
447 let scope = &self.scopes[len - scope_count];
448 self.cfg.terminate(block, scope.source_info(span),
449 TerminatorKind::Goto { target: target });
452 /// Creates a path that performs all required cleanup for dropping a generator.
454 /// This path terminates in GeneratorDrop. Returns the start of the path.
455 /// None indicates there’s no cleanup to do at this point.
456 pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
457 if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
462 self.diverge_cleanup_gen(true);
464 let src_info = self.scopes[0].source_info(self.fn_span);
465 let mut block = self.cfg.start_new_block();
467 let mut rest = &mut self.scopes[..];
469 while let Some((scope, rest_)) = {rest}.split_last_mut() {
471 if !scope.needs_cleanup {
474 block = if let Some(b) = scope.cached_generator_drop {
475 self.cfg.terminate(block, src_info,
476 TerminatorKind::Goto { target: b });
479 let b = self.cfg.start_new_block();
480 scope.cached_generator_drop = Some(b);
481 self.cfg.terminate(block, src_info,
482 TerminatorKind::Goto { target: b });
485 unpack!(block = build_scope_drops(&mut self.cfg,
492 // End all regions for scopes out of which we are breaking.
493 self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
496 self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
501 /// Creates a new visibility scope, nested in the current one.
502 pub fn new_visibility_scope(&mut self,
504 lint_level: LintLevel,
505 safety: Option<Safety>) -> VisibilityScope {
506 let parent = self.visibility_scope;
507 debug!("new_visibility_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
508 span, lint_level, safety,
509 parent, self.visibility_scope_info.get(parent));
510 let scope = self.visibility_scopes.push(VisibilityScopeData {
512 parent_scope: Some(parent),
514 let scope_info = VisibilityScopeInfo {
515 lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
518 self.visibility_scope_info[parent].lint_root
520 safety: safety.unwrap_or_else(|| {
521 self.visibility_scope_info[parent].safety
524 self.visibility_scope_info.push(scope_info);
530 /// Finds the breakable scope for a given label. This is used for
531 /// resolving `break` and `continue`.
532 pub fn find_breakable_scope(&mut self,
534 label: region::Scope)
535 -> &mut BreakableScope<'tcx> {
536 // find the loop-scope with the correct id
537 self.breakable_scopes.iter_mut()
539 .filter(|breakable_scope| breakable_scope.region_scope == label)
541 .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
544 /// Given a span and the current visibility scope, make a SourceInfo.
545 pub fn source_info(&self, span: Span) -> SourceInfo {
548 scope: self.visibility_scope
552 /// Returns the `region::Scope` of the scope which should be exited by a
554 pub fn region_scope_of_return_scope(&self) -> region::Scope {
555 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
556 // We want `scopes[1]`, which is the `ParameterScope`.
557 assert!(self.scopes.len() >= 2);
558 assert!(match self.scopes[1].region_scope.data() {
559 region::ScopeData::Arguments(_) => true,
562 self.scopes[1].region_scope
565 /// Returns the topmost active scope, which is known to be alive until
566 /// the next scope expression.
567 pub fn topmost_scope(&self) -> region::Scope {
568 self.scopes.last().expect("topmost_scope: no scopes present").region_scope
571 /// Returns the scope that we should use as the lifetime of an
572 /// operand. Basically, an operand must live until it is consumed.
573 /// This is similar to, but not quite the same as, the temporary
574 /// scope (which can be larger or smaller).
578 /// let x = foo(bar(X, Y));
580 /// We wish to pop the storage for X and Y after `bar()` is
581 /// called, not after the whole `let` is completed.
583 /// As another example, if the second argument diverges:
585 /// foo(Box::new(2), panic!())
587 /// We would allocate the box but then free it on the unwinding
588 /// path; we would also emit a free on the 'success' path from
589 /// panic, but that will turn out to be removed as dead-code.
591 /// When building statics/constants, returns `None` since
592 /// intermediate values do not have to be dropped in that case.
593 pub fn local_scope(&self) -> Option<region::Scope> {
595 MirSource::Const(_) |
596 MirSource::Static(..) =>
597 // No need to free storage in this context.
600 Some(self.topmost_scope()),
601 MirSource::Promoted(..) |
602 MirSource::GeneratorDrop(..) =>
609 /// Indicates that `lvalue` should be dropped on exit from
611 pub fn schedule_drop(&mut self,
613 region_scope: region::Scope,
614 lvalue: &Lvalue<'tcx>,
615 lvalue_ty: Ty<'tcx>) {
616 let needs_drop = self.hir.needs_drop(lvalue_ty);
617 let drop_kind = if needs_drop {
618 DropKind::Value { cached_block: CachedBlock::default() }
620 // Only temps and vars need their storage dead.
622 Lvalue::Local(index) if index.index() > self.arg_count => DropKind::Storage,
627 for scope in self.scopes.iter_mut().rev() {
628 let this_scope = scope.region_scope == region_scope;
629 // When building drops, we try to cache chains of drops in such a way so these drops
630 // could be reused by the drops which would branch into the cached (already built)
631 // blocks. This, however, means that whenever we add a drop into a scope which already
632 // had some blocks built (and thus, cached) for it, we must invalidate all caches which
633 // might branch into the scope which had a drop just added to it. This is necessary,
634 // because otherwise some other code might use the cache to branch into already built
635 // chain of drops, essentially ignoring the newly added drop.
637 // For example consider there’s two scopes with a drop in each. These are built and
638 // thus the caches are filled:
640 // +--------------------------------------------------------+
641 // | +---------------------------------+ |
642 // | | +--------+ +-------------+ | +---------------+ |
643 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
644 // | | +--------+ +-------------+ | +---------------+ |
645 // | +------------|outer_scope cache|--+ |
646 // +------------------------------|middle_scope cache|------+
648 // Now, a new, inner-most scope is added along with a new drop into both inner-most and
649 // outer-most scopes:
651 // +------------------------------------------------------------+
652 // | +----------------------------------+ |
653 // | | +--------+ +-------------+ | +---------------+ | +-------------+
654 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
655 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
656 // | | +-+ +-------------+ | |
657 // | +---|invalid outer_scope cache|----+ |
658 // +----=----------------|invalid middle_scope cache|-----------+
660 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
661 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
662 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
665 // The cache and its invalidation for unwind branch is somewhat special. The cache is
666 // per-drop, rather than per scope, which has a several different implications. Adding
667 // a new drop into a scope will not invalidate cached blocks of the prior drops in the
668 // scope. That is true, because none of the already existing drops will have an edge
669 // into a block with the newly added drop.
671 // Note that this code iterates scopes from the inner-most to the outer-most,
672 // invalidating caches of each scope visited. This way bare minimum of the
673 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
674 // cache of outer scpoe stays intact.
675 let invalidate_unwind = needs_drop && !this_scope;
676 scope.invalidate_cache(invalidate_unwind);
678 if let DropKind::Value { .. } = drop_kind {
679 scope.needs_cleanup = true;
681 let region_scope_span = region_scope.span(self.hir.tcx(),
682 &self.hir.region_scope_tree);
683 // Attribute scope exit drops to scope's closing brace
684 let scope_end = region_scope_span.with_lo(region_scope_span.hi());
685 scope.drops.push(DropData {
687 location: lvalue.clone(),
693 span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, lvalue);
698 /// Creates a path that performs all required cleanup for unwinding.
700 /// This path terminates in Resume. Returns the start of the path.
701 /// See module comment for more details. None indicates there’s no
702 /// cleanup to do at this point.
703 pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
704 self.diverge_cleanup_gen(false)
707 fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> {
708 if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
711 assert!(!self.scopes.is_empty()); // or `any` above would be false
713 let Builder { ref mut cfg, ref mut scopes,
714 ref mut cached_resume_block, .. } = *self;
716 // Build up the drops in **reverse** order. The end result will
719 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
721 // However, we build this in **reverse order**. That is, we
722 // process scopes[0], then scopes[1], etc, pointing each one at
723 // the result generates from the one before. Along the way, we
724 // store caches. If everything is cached, we'll just walk right
725 // to left reading the cached results but never created anything.
727 // To start, create the resume terminator.
728 let mut target = if let Some(target) = *cached_resume_block {
731 let resumeblk = cfg.start_new_cleanup_block();
732 cfg.terminate(resumeblk,
733 scopes[0].source_info(self.fn_span),
734 TerminatorKind::Resume);
735 *cached_resume_block = Some(resumeblk);
739 for scope in scopes.iter_mut() {
740 target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
741 scope, target, generator_drop);
746 /// Utility function for *non*-scope code to build their own drops
747 pub fn build_drop(&mut self,
750 location: Lvalue<'tcx>,
751 ty: Ty<'tcx>) -> BlockAnd<()> {
752 if !self.hir.needs_drop(ty) {
755 let source_info = self.source_info(span);
756 let next_target = self.cfg.start_new_block();
757 let diverge_target = self.diverge_cleanup();
758 self.cfg.terminate(block, source_info,
759 TerminatorKind::Drop {
762 unwind: diverge_target,
767 /// Utility function for *non*-scope code to build their own drops
768 pub fn build_drop_and_replace(&mut self,
771 location: Lvalue<'tcx>,
772 value: Operand<'tcx>) -> BlockAnd<()> {
773 let source_info = self.source_info(span);
774 let next_target = self.cfg.start_new_block();
775 let diverge_target = self.diverge_cleanup();
776 self.cfg.terminate(block, source_info,
777 TerminatorKind::DropAndReplace {
781 unwind: diverge_target,
786 /// Create an Assert terminator and return the success block.
787 /// If the boolean condition operand is not the expected value,
788 /// a runtime panic will be caused with the given message.
789 pub fn assert(&mut self, block: BasicBlock,
792 msg: AssertMessage<'tcx>,
795 let source_info = self.source_info(span);
797 let success_block = self.cfg.start_new_block();
798 let cleanup = self.diverge_cleanup();
800 self.cfg.terminate(block, source_info,
801 TerminatorKind::Assert {
805 target: success_block,
813 /// Builds drops for pop_scope and exit_scope.
814 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
816 earlier_scopes: &[Scope<'tcx>],
817 mut block: BasicBlock,
819 generator_drop: bool)
821 debug!("build_scope_drops({:?} -> {:?})", block, scope);
822 let mut iter = scope.drops.iter().rev().peekable();
823 while let Some(drop_data) = iter.next() {
824 let source_info = scope.source_info(drop_data.span);
825 match drop_data.kind {
826 DropKind::Value { .. } => {
827 // Try to find the next block with its cached block
828 // for us to diverge into in case the drop panics.
829 let on_diverge = iter.peek().iter().filter_map(|dd| {
831 DropKind::Value { cached_block } => {
832 let result = cached_block.get(generator_drop);
833 if result.is_none() {
834 span_bug!(drop_data.span, "cached block not present?")
838 DropKind::Storage => None
841 // If there’s no `cached_block`s within current scope,
842 // we must look for one in the enclosing scope.
843 let on_diverge = on_diverge.or_else(|| {
844 earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next()
846 let next = cfg.start_new_block();
847 cfg.terminate(block, source_info, TerminatorKind::Drop {
848 location: drop_data.location.clone(),
854 DropKind::Storage => {}
857 // We do not need to emit StorageDead for generator drops
862 // Drop the storage for both value and storage drops.
863 // Only temps and vars need their storage dead.
864 match drop_data.location {
865 Lvalue::Local(index) if index.index() > arg_count => {
866 cfg.push(block, Statement {
868 kind: StatementKind::StorageDead(index)
877 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
880 scope: &mut Scope<'tcx>,
881 mut target: BasicBlock,
882 generator_drop: bool)
885 // Build up the drops in **reverse** order. The end result will
888 // [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
890 // +---------------------------------------------------------+
893 // The code in this function reads from right to left. At each
894 // point, we check for cached blocks representing the
895 // remainder. If everything is cached, we'll just walk right to
896 // left reading the cached results but never create anything.
898 let visibility_scope = scope.visibility_scope;
899 let source_info = |span| SourceInfo {
901 scope: visibility_scope
904 // Next, build up the drops. Here we iterate the vector in
905 // *forward* order, so that we generate drops[0] first (right to
906 // left in diagram above).
907 for (j, drop_data) in scope.drops.iter_mut().enumerate() {
908 debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
909 // Only full value drops are emitted in the diverging path,
912 // Note: This may not actually be what we desire (are we
913 // "freeing" stack storage as we unwind, or merely observing a
914 // frozen stack)? In particular, the intent may have been to
915 // match the behavior of clang, but on inspection eddyb says
916 // this is not what clang does.
917 let cached_block = match drop_data.kind {
918 DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
919 DropKind::Storage => continue
921 target = if let Some(cached_block) = *cached_block {
924 let block = cfg.start_new_cleanup_block();
925 cfg.terminate(block, source_info(drop_data.span),
926 TerminatorKind::Drop {
927 location: drop_data.location.clone(),
931 *cached_block = Some(block);
936 // Finally, push the EndRegion block, used by mir-borrowck. (Block
937 // becomes trivial goto after pass that removes all EndRegions.)
939 let block = cfg.start_new_cleanup_block();
940 cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
941 cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });