1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, build its
14 contents, and then pop it off. Every scope is named by a
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of building
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
41 The primary purpose for scopes is to insert drops: while building
42 the contents, we also accumulate places that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those places onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
59 When processing the `let x`, we will add one drop to the scope for
60 `x`. The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
92 use rustc::middle::region;
95 use rustc::hir::def_id::LOCAL_CRATE;
97 use syntax_pos::{Span};
98 use rustc_data_structures::fx::FxHashMap;
99 use std::collections::hash_map::Entry;
102 pub struct Scope<'tcx> {
103 /// The source scope this scope was created in.
104 source_scope: SourceScope,
106 /// the region span of this scope within source code.
107 region_scope: region::Scope,
109 /// the span of that region_scope
110 region_scope_span: Span,
112 /// Whether there's anything to do for the cleanup path, that is,
113 /// when unwinding through this scope. This includes destructors,
114 /// but not StorageDead statements, which don't get emitted at all
115 /// for unwinding, for several reasons:
116 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
117 /// * LLVM's memory dependency analysis can't handle it atm
118 /// * polluting the cleanup MIR with StorageDead creates
119 /// landing pads even though there's no actual destructors
120 /// * freeing up stack space has no effect during unwinding
123 /// set of places to drop when exiting this scope. This starts
124 /// out empty but grows as variables are declared during the
125 /// building process. This is a stack, so we always drop from the
126 /// end of the vector (top of the stack) first.
127 drops: Vec<DropData<'tcx>>,
129 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
130 cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
132 /// The cache for drop chain on "generator drop" exit.
133 cached_generator_drop: Option<BasicBlock>,
135 /// The cache for drop chain on "unwind" exit.
136 cached_unwind: CachedBlock,
140 struct DropData<'tcx> {
141 /// span where drop obligation was incurred (typically where place was declared)
145 location: Place<'tcx>,
147 /// Whether this is a value Drop or a StorageDead.
151 #[derive(Debug, Default, Clone, Copy)]
152 pub(crate) struct CachedBlock {
153 /// The cached block for the cleanups-on-diverge path. This block
154 /// contains code to run the current drop and all the preceding
155 /// drops (i.e. those having lower index in Drop’s Scope drop
157 unwind: Option<BasicBlock>,
159 /// The cached block for unwinds during cleanups-on-generator-drop path
161 /// This is split from the standard unwind path here to prevent drop
162 /// elaboration from creating drop flags that would have to be captured
163 /// by the generator. I'm not sure how important this optimization is,
165 generator_drop: Option<BasicBlock>,
169 pub(crate) enum DropKind {
171 cached_block: CachedBlock,
176 #[derive(Clone, Debug)]
177 pub struct BreakableScope<'tcx> {
178 /// Region scope of the loop
179 pub region_scope: region::Scope,
180 /// Where the body of the loop begins. `None` if block
181 pub continue_block: Option<BasicBlock>,
182 /// Block to branch into when the loop or block terminates (either by being `break`-en out
183 /// from, or by having its condition to become false)
184 pub break_block: BasicBlock,
185 /// The destination of the loop/block expression itself (i.e. where to put the result of a
186 /// `break` expression)
187 pub break_destination: Place<'tcx>,
191 fn invalidate(&mut self) {
192 self.generator_drop = None;
196 fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
204 fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
206 &mut self.generator_drop
214 fn may_panic(&self) -> bool {
216 DropKind::Value { .. } => true,
217 DropKind::Storage => false
222 impl<'tcx> Scope<'tcx> {
223 /// Invalidate all the cached blocks in the scope.
225 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
226 /// larger extent of code.
228 /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
229 /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
230 /// top-of-scope (as opposed to dependent scopes).
231 fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
232 // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
233 // with lots of `try!`?
235 // cached exits drop storage and refer to the top-of-scope
236 self.cached_exits.clear();
239 // the current generator drop and unwind ignore
240 // storage but refer to top-of-scope
241 self.cached_generator_drop = None;
242 self.cached_unwind.invalidate();
245 if !storage_only && !this_scope_only {
246 for drop_data in &mut self.drops {
247 if let DropKind::Value { ref mut cached_block } = drop_data.kind {
248 cached_block.invalidate();
254 /// Given a span and this scope's source scope, make a SourceInfo.
255 fn source_info(&self, span: Span) -> SourceInfo {
258 scope: self.source_scope
263 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
264 // Adding and removing scopes
265 // ==========================
266 /// Start a breakable scope, which tracks where `continue` and `break`
267 /// should branch to. See module comment for more details.
269 /// Returns the might_break attribute of the BreakableScope used.
270 pub fn in_breakable_scope<F, R>(&mut self,
271 loop_block: Option<BasicBlock>,
272 break_block: BasicBlock,
273 break_destination: Place<'tcx>,
275 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
277 let region_scope = self.topmost_scope();
278 let scope = BreakableScope {
280 continue_block: loop_block,
284 self.breakable_scopes.push(scope);
286 let breakable_scope = self.breakable_scopes.pop().unwrap();
287 assert!(breakable_scope.region_scope == region_scope);
291 pub fn in_opt_scope<F, R>(&mut self,
292 opt_scope: Option<(region::Scope, SourceInfo)>,
293 mut block: BasicBlock,
296 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
298 debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
299 if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
300 let rv = unpack!(block = f(self));
301 if let Some(region_scope) = opt_scope {
302 unpack!(block = self.pop_scope(region_scope, block));
304 debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
308 /// Convenience wrapper that pushes a scope and then executes `f`
309 /// to build its contents, popping the scope afterwards.
310 pub fn in_scope<F, R>(&mut self,
311 region_scope: (region::Scope, SourceInfo),
312 lint_level: LintLevel,
313 mut block: BasicBlock,
316 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
318 debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
319 let source_scope = self.source_scope;
320 let tcx = self.hir.tcx();
321 if let LintLevel::Explicit(node_id) = lint_level {
322 let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
323 let sets = tcx.lint_levels(LOCAL_CRATE);
325 tcx.hir.definitions().node_to_hir_id(
326 self.source_scope_local_data[source_scope].lint_root
329 tcx.hir.definitions().node_to_hir_id(node_id);
330 sets.lint_level_set(parent_hir_id) ==
331 sets.lint_level_set(current_hir_id)
334 if !same_lint_scopes {
336 self.new_source_scope(region_scope.1.span, lint_level,
340 self.push_scope(region_scope);
341 let rv = unpack!(block = f(self));
342 unpack!(block = self.pop_scope(region_scope, block));
343 self.source_scope = source_scope;
344 debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
348 /// Push a scope onto the stack. You can then build code in this
349 /// scope and call `pop_scope` afterwards. Note that these two
350 /// calls must be paired; using `in_scope` as a convenience
351 /// wrapper maybe preferable.
352 pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
353 debug!("push_scope({:?})", region_scope);
354 let vis_scope = self.source_scope;
355 self.scopes.push(Scope {
356 source_scope: vis_scope,
357 region_scope: region_scope.0,
358 region_scope_span: region_scope.1.span,
359 needs_cleanup: false,
361 cached_generator_drop: None,
362 cached_exits: Default::default(),
363 cached_unwind: CachedBlock::default(),
367 /// Pops a scope, which should have region scope `region_scope`,
368 /// adding any drops onto the end of `block` that are needed.
369 /// This must match 1-to-1 with `push_scope`.
370 pub fn pop_scope(&mut self,
371 region_scope: (region::Scope, SourceInfo),
372 mut block: BasicBlock)
374 debug!("pop_scope({:?}, {:?})", region_scope, block);
375 // If we are emitting a `drop` statement, we need to have the cached
376 // diverge cleanup pads ready in case that drop panics.
378 self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
380 self.diverge_cleanup();
382 let scope = self.scopes.pop().unwrap();
383 assert_eq!(scope.region_scope, region_scope.0);
385 let unwind_to = self.scopes.last().and_then(|next_scope| {
386 next_scope.cached_unwind.get(false)
387 }).unwrap_or_else(|| self.resume_block());
389 unpack!(block = build_scope_drops(
402 /// Branch out of `block` to `target`, exiting all scopes up to
403 /// and including `region_scope`. This will insert whatever drops are
404 /// needed. See module comment for details.
405 pub fn exit_scope(&mut self,
407 region_scope: (region::Scope, SourceInfo),
408 mut block: BasicBlock,
409 target: BasicBlock) {
410 debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
411 region_scope, block, target);
412 let scope_count = 1 + self.scopes.iter().rev()
413 .position(|scope| scope.region_scope == region_scope.0)
415 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
417 let len = self.scopes.len();
418 assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
420 // If we are emitting a `drop` statement, we need to have the cached
421 // diverge cleanup pads ready in case that drop panics.
422 let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup);
424 self.diverge_cleanup();
427 let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev();
428 let mut scope = scopes.next().unwrap();
429 for next_scope in scopes {
430 if scope.drops.is_empty() {
434 let source_info = scope.source_info(span);
435 block = match scope.cached_exits.entry((target, region_scope.0)) {
436 Entry::Occupied(e) => {
437 self.cfg.terminate(block, source_info,
438 TerminatorKind::Goto { target: *e.get() });
441 Entry::Vacant(v) => {
442 let b = self.cfg.start_new_block();
443 self.cfg.terminate(block, source_info,
444 TerminatorKind::Goto { target: b });
450 let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| {
451 debug_assert!(!may_panic, "cached block not present?");
455 unpack!(block = build_scope_drops(
467 let scope = &self.scopes[len - scope_count];
468 self.cfg.terminate(block, scope.source_info(span),
469 TerminatorKind::Goto { target });
472 /// Creates a path that performs all required cleanup for dropping a generator.
474 /// This path terminates in GeneratorDrop. Returns the start of the path.
475 /// None indicates there’s no cleanup to do at this point.
476 pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
477 if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
481 // Fill in the cache for unwinds
482 self.diverge_cleanup_gen(true);
484 let src_info = self.scopes[0].source_info(self.fn_span);
485 let resume_block = self.resume_block();
486 let mut scopes = self.scopes.iter_mut().rev().peekable();
487 let mut block = self.cfg.start_new_block();
490 while let Some(scope) = scopes.next() {
491 if !scope.needs_cleanup {
495 block = if let Some(b) = scope.cached_generator_drop {
496 self.cfg.terminate(block, src_info,
497 TerminatorKind::Goto { target: b });
500 let b = self.cfg.start_new_block();
501 scope.cached_generator_drop = Some(b);
502 self.cfg.terminate(block, src_info,
503 TerminatorKind::Goto { target: b });
507 let unwind_to = scopes.peek().as_ref().map(|scope| {
508 scope.cached_unwind.get(true).unwrap_or_else(|| {
509 span_bug!(src_info.span, "cached block not present?")
511 }).unwrap_or(resume_block);
513 unpack!(block = build_scope_drops(
523 self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
528 /// Creates a new source scope, nested in the current one.
529 pub fn new_source_scope(&mut self,
531 lint_level: LintLevel,
532 safety: Option<Safety>) -> SourceScope {
533 let parent = self.source_scope;
534 debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
535 span, lint_level, safety,
536 parent, self.source_scope_local_data.get(parent));
537 let scope = self.source_scopes.push(SourceScopeData {
539 parent_scope: Some(parent),
541 let scope_local_data = SourceScopeLocalData {
542 lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
545 self.source_scope_local_data[parent].lint_root
547 safety: safety.unwrap_or_else(|| {
548 self.source_scope_local_data[parent].safety
551 self.source_scope_local_data.push(scope_local_data);
557 /// Finds the breakable scope for a given label. This is used for
558 /// resolving `break` and `continue`.
559 pub fn find_breakable_scope(&self,
561 label: region::Scope)
562 -> &BreakableScope<'tcx> {
563 // find the loop-scope with the correct id
564 self.breakable_scopes.iter()
566 .filter(|breakable_scope| breakable_scope.region_scope == label)
568 .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
571 /// Given a span and the current source scope, make a SourceInfo.
572 pub fn source_info(&self, span: Span) -> SourceInfo {
575 scope: self.source_scope
579 /// Returns the `region::Scope` of the scope which should be exited by a
581 pub fn region_scope_of_return_scope(&self) -> region::Scope {
582 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
583 // We want `scopes[1]`, which is the `ParameterScope`.
584 assert!(self.scopes.len() >= 2);
585 assert!(match self.scopes[1].region_scope.data {
586 region::ScopeData::Arguments => true,
589 self.scopes[1].region_scope
592 /// Returns the topmost active scope, which is known to be alive until
593 /// the next scope expression.
594 pub fn topmost_scope(&self) -> region::Scope {
595 self.scopes.last().expect("topmost_scope: no scopes present").region_scope
598 /// Returns the scope that we should use as the lifetime of an
599 /// operand. Basically, an operand must live until it is consumed.
600 /// This is similar to, but not quite the same as, the temporary
601 /// scope (which can be larger or smaller).
605 /// let x = foo(bar(X, Y));
607 /// We wish to pop the storage for X and Y after `bar()` is
608 /// called, not after the whole `let` is completed.
610 /// As another example, if the second argument diverges:
612 /// foo(Box::new(2), panic!())
614 /// We would allocate the box but then free it on the unwinding
615 /// path; we would also emit a free on the 'success' path from
616 /// panic, but that will turn out to be removed as dead-code.
618 /// When building statics/constants, returns `None` since
619 /// intermediate values do not have to be dropped in that case.
620 pub fn local_scope(&self) -> Option<region::Scope> {
621 match self.hir.body_owner_kind {
622 hir::BodyOwnerKind::Const |
623 hir::BodyOwnerKind::Static(_) =>
624 // No need to free storage in this context.
626 hir::BodyOwnerKind::Fn =>
627 Some(self.topmost_scope()),
631 // Schedule an abort block - this is used for some ABIs that cannot unwind
632 pub fn schedule_abort(&mut self) -> BasicBlock {
633 self.scopes[0].needs_cleanup = true;
634 let abortblk = self.cfg.start_new_cleanup_block();
635 let source_info = self.scopes[0].source_info(self.fn_span);
636 self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
637 self.cached_resume_block = Some(abortblk);
641 pub fn schedule_drop_storage_and_value(
644 region_scope: region::Scope,
649 span, region_scope, place, place_ty,
653 span, region_scope, place, place_ty,
655 cached_block: CachedBlock::default(),
662 /// Indicates that `place` should be dropped on exit from
665 /// When called with `DropKind::Storage`, `place` should be a local
666 /// with an index higher than the current `self.arg_count`.
667 pub fn schedule_drop(
670 region_scope: region::Scope,
675 let needs_drop = self.hir.needs_drop(place_ty);
677 DropKind::Value { .. } => if !needs_drop { return },
678 DropKind::Storage => {
680 Place::Local(index) => if index.index() <= self.arg_count {
682 span, "`schedule_drop` called with index {} and arg_count {}",
688 span, "`schedule_drop` called with non-`Local` place {:?}", place
694 for scope in self.scopes.iter_mut().rev() {
695 let this_scope = scope.region_scope == region_scope;
696 // When building drops, we try to cache chains of drops in such a way so these drops
697 // could be reused by the drops which would branch into the cached (already built)
698 // blocks. This, however, means that whenever we add a drop into a scope which already
699 // had some blocks built (and thus, cached) for it, we must invalidate all caches which
700 // might branch into the scope which had a drop just added to it. This is necessary,
701 // because otherwise some other code might use the cache to branch into already built
702 // chain of drops, essentially ignoring the newly added drop.
704 // For example consider there’s two scopes with a drop in each. These are built and
705 // thus the caches are filled:
707 // +--------------------------------------------------------+
708 // | +---------------------------------+ |
709 // | | +--------+ +-------------+ | +---------------+ |
710 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
711 // | | +--------+ +-------------+ | +---------------+ |
712 // | +------------|outer_scope cache|--+ |
713 // +------------------------------|middle_scope cache|------+
715 // Now, a new, inner-most scope is added along with a new drop into both inner-most and
716 // outer-most scopes:
718 // +------------------------------------------------------------+
719 // | +----------------------------------+ |
720 // | | +--------+ +-------------+ | +---------------+ | +-------------+
721 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
722 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
723 // | | +-+ +-------------+ | |
724 // | +---|invalid outer_scope cache|----+ |
725 // +----=----------------|invalid middle_scope cache|-----------+
727 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
728 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
729 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
732 // The cache and its invalidation for unwind branch is somewhat special. The cache is
733 // per-drop, rather than per scope, which has a several different implications. Adding
734 // a new drop into a scope will not invalidate cached blocks of the prior drops in the
735 // scope. That is true, because none of the already existing drops will have an edge
736 // into a block with the newly added drop.
738 // Note that this code iterates scopes from the inner-most to the outer-most,
739 // invalidating caches of each scope visited. This way bare minimum of the
740 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
741 // cache of outer scpoe stays intact.
742 scope.invalidate_cache(!needs_drop, this_scope);
744 if let DropKind::Value { .. } = drop_kind {
745 scope.needs_cleanup = true;
748 let region_scope_span = region_scope.span(self.hir.tcx(),
749 &self.hir.region_scope_tree);
750 // Attribute scope exit drops to scope's closing brace.
751 let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span);
753 scope.drops.push(DropData {
755 location: place.clone(),
761 span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
766 /// Creates a path that performs all required cleanup for unwinding.
768 /// This path terminates in Resume. Returns the start of the path.
769 /// See module comment for more details.
770 pub fn diverge_cleanup(&mut self) -> BasicBlock {
771 self.diverge_cleanup_gen(false)
774 fn resume_block(&mut self) -> BasicBlock {
775 if let Some(target) = self.cached_resume_block {
778 let resumeblk = self.cfg.start_new_cleanup_block();
779 self.cfg.terminate(resumeblk,
781 scope: OUTERMOST_SOURCE_SCOPE,
784 TerminatorKind::Resume);
785 self.cached_resume_block = Some(resumeblk);
790 fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
791 // Build up the drops in **reverse** order. The end result will
794 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
796 // However, we build this in **reverse order**. That is, we
797 // process scopes[0], then scopes[1], etc, pointing each one at
798 // the result generates from the one before. Along the way, we
799 // store caches. If everything is cached, we'll just walk right
800 // to left reading the cached results but never created anything.
802 // Find the last cached block
803 let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter()
804 .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) {
805 (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1)
807 (self.resume_block(), 0)
810 for scope in self.scopes[first_uncached..].iter_mut() {
811 target = build_diverge_scope(&mut self.cfg, scope.region_scope_span,
812 scope, target, generator_drop);
818 /// Utility function for *non*-scope code to build their own drops
819 pub fn build_drop(&mut self,
822 location: Place<'tcx>,
823 ty: Ty<'tcx>) -> BlockAnd<()> {
824 if !self.hir.needs_drop(ty) {
827 let source_info = self.source_info(span);
828 let next_target = self.cfg.start_new_block();
829 let diverge_target = self.diverge_cleanup();
830 self.cfg.terminate(block, source_info,
831 TerminatorKind::Drop {
834 unwind: Some(diverge_target),
839 /// Utility function for *non*-scope code to build their own drops
840 pub fn build_drop_and_replace(&mut self,
843 location: Place<'tcx>,
844 value: Operand<'tcx>) -> BlockAnd<()> {
845 let source_info = self.source_info(span);
846 let next_target = self.cfg.start_new_block();
847 let diverge_target = self.diverge_cleanup();
848 self.cfg.terminate(block, source_info,
849 TerminatorKind::DropAndReplace {
853 unwind: Some(diverge_target),
858 /// Create an Assert terminator and return the success block.
859 /// If the boolean condition operand is not the expected value,
860 /// a runtime panic will be caused with the given message.
861 pub fn assert(&mut self, block: BasicBlock,
864 msg: AssertMessage<'tcx>,
867 let source_info = self.source_info(span);
869 let success_block = self.cfg.start_new_block();
870 let cleanup = self.diverge_cleanup();
872 self.cfg.terminate(block, source_info,
873 TerminatorKind::Assert {
877 target: success_block,
878 cleanup: Some(cleanup),
885 /// Builds drops for pop_scope and exit_scope.
886 fn build_scope_drops<'tcx>(
889 mut block: BasicBlock,
890 last_unwind_to: BasicBlock,
892 generator_drop: bool,
894 debug!("build_scope_drops({:?} -> {:?}", block, scope);
896 // Build up the drops in evaluation order. The end result will
899 // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
903 // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
905 // The horizontal arrows represent the execution path when the drops return
906 // successfully. The downwards arrows represent the execution path when the
907 // drops panic (panicking while unwinding will abort, so there's no need for
908 // another set of arrows). The drops for the unwind path should have already
909 // been generated by `diverge_cleanup_gen`.
911 // The code in this function reads from right to left.
912 // Storage dead drops have to be done left to right (since we can only push
913 // to the end of a Vec). So, we find the next drop and then call
914 // push_storage_deads which will iterate backwards through them so that
915 // they are added in the correct order.
917 let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| {
918 if let DropKind::Value { cached_block } = drop_data.kind {
919 Some(cached_block.get(generator_drop).unwrap_or_else(|| {
920 span_bug!(drop_data.span, "cached block not present?")
927 // When we unwind from a drop, we start cleaning up from the next one, so
928 // we don't need this block.
929 unwind_blocks.next();
931 for drop_data in scope.drops.iter().rev() {
932 let source_info = scope.source_info(drop_data.span);
933 match drop_data.kind {
934 DropKind::Value { .. } => {
935 let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to);
937 let next = cfg.start_new_block();
938 cfg.terminate(block, source_info, TerminatorKind::Drop {
939 location: drop_data.location.clone(),
941 unwind: Some(unwind_to)
945 DropKind::Storage => {
946 // We do not need to emit StorageDead for generator drops
951 // Drop the storage for both value and storage drops.
952 // Only temps and vars need their storage dead.
953 match drop_data.location {
954 Place::Local(index) if index.index() > arg_count => {
955 cfg.push(block, Statement {
957 kind: StatementKind::StorageDead(index)
968 fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
970 scope: &mut Scope<'tcx>,
971 mut target: BasicBlock,
972 generator_drop: bool)
975 // Build up the drops in **reverse** order. The end result will
978 // [drops[n]] -...-> [drops[0]] -> [target]
980 // The code in this function reads from right to left. At each
981 // point, we check for cached blocks representing the
982 // remainder. If everything is cached, we'll just walk right to
983 // left reading the cached results but never create anything.
985 let source_scope = scope.source_scope;
986 let source_info = |span| SourceInfo {
991 // Next, build up the drops. Here we iterate the vector in
992 // *forward* order, so that we generate drops[0] first (right to
993 // left in diagram above).
994 for (j, drop_data) in scope.drops.iter_mut().enumerate() {
995 debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
996 // Only full value drops are emitted in the diverging path,
999 // Note: This may not actually be what we desire (are we
1000 // "freeing" stack storage as we unwind, or merely observing a
1001 // frozen stack)? In particular, the intent may have been to
1002 // match the behavior of clang, but on inspection eddyb says
1003 // this is not what clang does.
1004 let cached_block = match drop_data.kind {
1005 DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
1006 DropKind::Storage => continue
1008 target = if let Some(cached_block) = *cached_block {
1011 let block = cfg.start_new_cleanup_block();
1012 cfg.terminate(block, source_info(drop_data.span),
1013 TerminatorKind::Drop {
1014 location: drop_data.location.clone(),
1018 *cached_block = Some(block);
1023 *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
1025 debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);