#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum StatementKind<'tcx> {
+ /// Write the RHS Rvalue to the LHS Lvalue.
Assign(Lvalue<'tcx>, Rvalue<'tcx>),
- SetDiscriminant{ lvalue: Lvalue<'tcx>, variant_index: usize },
+
+ /// Write the discriminant for a variant to the enum Lvalue.
+ SetDiscriminant { lvalue: Lvalue<'tcx>, variant_index: usize },
+
+ /// Start a live range for the storage of the local.
+ StorageLive(Lvalue<'tcx>),
+
+ /// End the current live range for the storage of the local.
+ StorageDead(Lvalue<'tcx>),
}
impl<'tcx> Debug for Statement<'tcx> {
use self::StatementKind::*;
match self.kind {
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv),
+ StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv),
+ StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv),
SetDiscriminant{lvalue: ref lv, variant_index: index} => {
write!(fmt, "discriminant({:?}) = {:?}", lv, index)
}
StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => {
self.visit_lvalue(lvalue, LvalueContext::Store);
}
+ StatementKind::StorageLive(ref $($mutability)* lvalue) => {
+ self.visit_lvalue(lvalue, LvalueContext::StorageLive);
+ }
+ StatementKind::StorageDead(ref $($mutability)* lvalue) => {
+ self.visit_lvalue(lvalue, LvalueContext::StorageDead);
+ }
}
}
// Consumed as part of an operand
Consume,
+
+ // Starting and ending a storage live range
+ StorageLive,
+ StorageDead,
}
sets.kill_set.add(&moi);
});
}
+ repr::StatementKind::StorageLive(_) |
+ repr::StatementKind::StorageDead(_) => {}
}
}
repr::StatementKind::Assign(ref lvalue, ref rvalue) => {
(lvalue, rvalue)
}
+ repr::StatementKind::StorageLive(_) |
+ repr::StatementKind::StorageDead(_) => continue,
repr::StatementKind::SetDiscriminant{ .. } =>
span_bug!(stmt.source_info.span,
"sanity_check should run before Deaggregator inserts SetDiscriminant"),
Rvalue::InlineAsm { .. } => {}
}
}
+ StatementKind::StorageLive(_) |
+ StatementKind::StorageDead(_) => {}
StatementKind::SetDiscriminant{ .. } => {
span_bug!(stmt.source_info.span,
"SetDiscriminant should not exist during borrowck");
move_data.rev_lookup.find(lvalue),
|moi| callback(moi, DropFlagState::Present))
}
+ repr::StatementKind::StorageLive(_) |
+ repr::StatementKind::StorageDead(_) => {}
},
None => {
debug!("drop_flag_effects: replace {:?}", block.terminator());
// FIXME #30046 ^~~~
this.expr_into_pattern(block, pattern, init)
}));
+ } else {
+ this.storage_live_for_bindings(block, &pattern);
}
// Enter the visibility scope, after evaluating the initializer.
let temp = this.temp(expr_ty.clone());
let temp_lifetime = expr.temp_lifetime;
let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+
+ if temp_lifetime.is_some() {
+ this.cfg.push(block, Statement {
+ source_info: source_info,
+ kind: StatementKind::StorageLive(temp.clone())
+ });
+ }
// Careful here not to cause an infinite cycle. If we always
// called `into`, then for lvalues like `x.f`, it would
Category::Lvalue => {
let lvalue = unpack!(block = this.as_lvalue(block, expr));
let rvalue = Rvalue::Use(Operand::Consume(lvalue));
- let source_info = this.source_info(expr_span);
this.cfg.push_assign(block, source_info, &temp, rvalue);
}
_ => {
PatternKind::Binding { mode: BindingMode::ByValue,
var,
subpattern: None, .. } => {
+ self.storage_live_for_bindings(block, &irrefutable_pat);
let lvalue = Lvalue::Var(self.var_indices[&var]);
return self.into(&lvalue, block, initializer);
}
}
var_scope
}
+
+ /// Emit `StorageLive` for every binding in the pattern.
+ pub fn storage_live_for_bindings(&mut self,
+ block: BasicBlock,
+ pattern: &Pattern<'tcx>) {
+ match *pattern.kind {
+ PatternKind::Binding { var, ref subpattern, .. } => {
+ let lvalue = Lvalue::Var(self.var_indices[&var]);
+ let source_info = self.source_info(pattern.span);
+ self.cfg.push(block, Statement {
+ source_info: source_info,
+ kind: StatementKind::StorageLive(lvalue)
+ });
+
+ if let Some(subpattern) = subpattern.as_ref() {
+ self.storage_live_for_bindings(block, subpattern);
+ }
+ }
+ PatternKind::Array { ref prefix, ref slice, ref suffix } |
+ PatternKind::Slice { ref prefix, ref slice, ref suffix } => {
+ for subpattern in prefix.iter().chain(slice).chain(suffix) {
+ self.storage_live_for_bindings(block, subpattern);
+ }
+ }
+ PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => {
+ }
+ PatternKind::Deref { ref subpattern } => {
+ self.storage_live_for_bindings(block, subpattern);
+ }
+ PatternKind::Leaf { ref subpatterns } |
+ PatternKind::Variant { ref subpatterns, .. } => {
+ for subpattern in subpatterns {
+ self.storage_live_for_bindings(block, &subpattern.pattern);
+ }
+ }
+ }
+ }
}
/// List of blocks for each arm (and potentially other metadata in the
};
let source_info = self.source_info(binding.span);
+ self.cfg.push(block, Statement {
+ source_info: source_info,
+ kind: StatementKind::StorageLive(Lvalue::Var(var_index))
+ });
self.cfg.push_assign(block, source_info,
&Lvalue::Var(var_index), rvalue);
}
let span = tcx.map.span(item_id);
let mut builder = Builder::new(hir, span);
- let extent = ROOT_CODE_EXTENT;
+ let extent = tcx.region_maps.temporary_scope(ast_expr.id)
+ .unwrap_or(ROOT_CODE_EXTENT);
let mut block = START_BLOCK;
let _ = builder.in_scope(extent, block, |builder| {
let expr = builder.hir.mirror(ast_expr);
/// `ScopeAuxiliary`, but kept here for convenience
extent: CodeExtent,
+ /// Whether there's anything to do for the cleanup path, that is,
+ /// when unwinding through this scope. This includes destructors,
+ /// but not StorageDead statements, which don't get emitted at all
+ /// for unwinding, for several reasons:
+ /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
+ /// * LLVM's memory dependency analysis can't handle it atm
+ /// * pollutting the cleanup MIR with StorageDead creates
+ /// landing pads even though there's no actual destructors
+ /// * freeing up stack space has no effect during unwinding
+ needs_cleanup: bool,
+
/// set of lvalues to drop when exiting this scope. This starts
/// out empty but grows as variables are declared during the
/// building process. This is a stack, so we always drop from the
/// lvalue to drop
location: Lvalue<'tcx>,
- /// The cached block for the cleanups-on-diverge path. This block
- /// contains code to run the current drop and all the preceding
- /// drops (i.e. those having lower index in Drop’s Scope drop
- /// array)
- cached_block: Option<BasicBlock>
+ /// Whether this is a full value Drop, or just a StorageDead.
+ kind: DropKind
+}
+
+enum DropKind {
+ Value {
+ /// The cached block for the cleanups-on-diverge path. This block
+ /// contains code to run the current drop and all the preceding
+ /// drops (i.e. those having lower index in Drop’s Scope drop
+ /// array)
+ cached_block: Option<BasicBlock>
+ },
+ Storage
}
struct FreeData<'tcx> {
fn invalidate_cache(&mut self) {
self.cached_exits = FnvHashMap();
for dropdata in &mut self.drops {
- dropdata.cached_block = None;
+ if let DropKind::Value { ref mut cached_block } = dropdata.kind {
+ *cached_block = None;
+ }
}
if let Some(ref mut freedata) = self.free {
freedata.cached_block = None;
/// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
/// this method to work correctly.
fn cached_block(&self) -> Option<BasicBlock> {
- if let Some(data) = self.drops.last() {
- Some(data.cached_block.expect("drop cache is not filled"))
+ let mut drops = self.drops.iter().rev().filter_map(|data| {
+ match data.kind {
+ DropKind::Value { cached_block } => Some(cached_block),
+ DropKind::Storage => None
+ }
+ });
+ if let Some(cached_block) = drops.next() {
+ Some(cached_block.expect("drop cache is not filled"))
} else if let Some(ref data) = self.free {
Some(data.cached_block.expect("free cache is not filled"))
} else {
id: id,
visibility_scope: vis_scope,
extent: extent,
+ needs_cleanup: false,
drops: vec![],
free: None,
cached_exits: FnvHashMap()
extent: CodeExtent,
lvalue: &Lvalue<'tcx>,
lvalue_ty: Ty<'tcx>) {
- if !self.hir.needs_drop(lvalue_ty) {
- return
- }
+ let needs_drop = self.hir.needs_drop(lvalue_ty);
+ let drop_kind = if needs_drop {
+ DropKind::Value { cached_block: None }
+ } else {
+ // Only temps and vars need their storage dead.
+ match *lvalue {
+ Lvalue::Temp(_) | Lvalue::Var(_) => DropKind::Storage,
+ _ => return
+ }
+ };
+
for scope in self.scopes.iter_mut().rev() {
if scope.extent == extent {
+ if let DropKind::Value { .. } = drop_kind {
+ scope.needs_cleanup = true;
+ }
+
// No need to invalidate any caches here. The just-scheduled drop will branch into
// the drop that comes before it in the vector.
scope.drops.push(DropData {
span: span,
location: lvalue.clone(),
- cached_block: None
+ kind: drop_kind
});
return;
} else {
// We must invalidate all the cached_blocks leading up to the scope we’re
// looking for, because all of the blocks in the chain will become incorrect.
- scope.invalidate_cache()
+ if let DropKind::Value { .. } = drop_kind {
+ scope.invalidate_cache()
+ }
}
}
span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
// We also must invalidate the caches in the scope for which the free is scheduled
// because the drops must branch into the free we schedule here.
scope.invalidate_cache();
+ scope.needs_cleanup = true;
scope.free = Some(FreeData {
span: span,
value: value.clone(),
/// See module comment for more details. None indicates there’s no
/// cleanup to do at this point.
pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
- if self.scopes.iter().all(|scope| scope.drops.is_empty() && scope.free.is_none()) {
+ if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
return None;
}
- assert!(!self.scopes.is_empty()); // or `all` above would be true
+ assert!(!self.scopes.is_empty()); // or `any` above would be false
let unit_temp = self.get_unit_temp();
let Builder { ref mut hir, ref mut cfg, ref mut scopes,
resumeblk
};
- for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) {
+ for scope in scopes.iter_mut().filter(|s| s.needs_cleanup) {
target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target);
}
Some(target)
-> BlockAnd<()> {
let mut iter = scope.drops.iter().rev().peekable();
while let Some(drop_data) = iter.next() {
- // Try to find the next block with its cached block for us to diverge into in case the
- // drop panics.
- let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next();
- // If there’s no `cached_block`s within current scope, we must look for one in the
- // enclosing scope.
- let on_diverge = on_diverge.or_else(||{
- earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
- });
- let next = cfg.start_new_block();
- cfg.terminate(block, scope.source_info(drop_data.span), TerminatorKind::Drop {
- location: drop_data.location.clone(),
- target: next,
- unwind: on_diverge
- });
- block = next;
+ let source_info = scope.source_info(drop_data.span);
+ if let DropKind::Value { .. } = drop_data.kind {
+ // Try to find the next block with its cached block
+ // for us to diverge into in case the drop panics.
+ let on_diverge = iter.peek().iter().filter_map(|dd| {
+ match dd.kind {
+ DropKind::Value { cached_block } => cached_block,
+ DropKind::Storage => None
+ }
+ }).next();
+ // If there’s no `cached_block`s within current scope,
+ // we must look for one in the enclosing scope.
+ let on_diverge = on_diverge.or_else(||{
+ earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
+ });
+ let next = cfg.start_new_block();
+ cfg.terminate(block, source_info, TerminatorKind::Drop {
+ location: drop_data.location.clone(),
+ target: next,
+ unwind: on_diverge
+ });
+ block = next;
+ }
+ match drop_data.kind {
+ DropKind::Value { .. } |
+ DropKind::Storage => {
+ // Only temps and vars need their storage dead.
+ match drop_data.location {
+ Lvalue::Temp(_) | Lvalue::Var(_) => {}
+ _ => continue
+ }
+
+ cfg.push(block, Statement {
+ source_info: source_info,
+ kind: StatementKind::StorageDead(drop_data.location.clone())
+ });
+ }
+ }
}
block.unit()
}
// *forward* order, so that we generate drops[0] first (right to
// left in diagram above).
for drop_data in &mut scope.drops {
- target = if let Some(cached_block) = drop_data.cached_block {
+ // Only full value drops are emitted in the diverging path,
+ // not StorageDead.
+ let cached_block = match drop_data.kind {
+ DropKind::Value { ref mut cached_block } => cached_block,
+ DropKind::Storage => continue
+ };
+ target = if let Some(cached_block) = *cached_block {
cached_block
} else {
let block = cfg.start_new_cleanup_block();
target: target,
unwind: None
});
- drop_data.cached_block = Some(block);
+ *cached_block = Some(block);
block
};
}
let orig_stmt = bb.statements.pop().unwrap();
let (lhs, rhs) = match orig_stmt.kind {
StatementKind::Assign(ref lhs, ref rhs) => (lhs, rhs),
- StatementKind::SetDiscriminant{ .. } =>
- span_bug!(src_info.span, "expected aggregate, not {:?}", orig_stmt.kind),
+ _ => span_bug!(src_info.span, "expected assign, not {:?}", orig_stmt),
};
let (agg_kind, operands) = match rhs {
&Rvalue::Aggregate(ref agg_kind, ref operands) => (agg_kind, operands),
let ref statement = statements[i];
let rhs = match statement.kind {
StatementKind::Assign(_, ref rhs) => rhs,
- StatementKind::SetDiscriminant{ .. } => continue,
+ _ => continue,
};
let (kind, operands) = match rhs {
&Rvalue::Aggregate(ref kind, ref operands) => (kind, operands),
if let Lvalue::Temp(index) = *lvalue {
// Ignore drops, if the temp gets promoted,
// then it's constant and thus drop is noop.
- if let LvalueContext::Drop = context {
- return;
+ // Storage live ranges are also irrelevant.
+ match context {
+ LvalueContext::Drop |
+ LvalueContext::StorageLive |
+ LvalueContext::StorageDead => return,
+ _ => {}
}
let temp = &mut self.temps[index];
let (mut rvalue, mut call) = (None, None);
let source_info = if stmt_idx < no_stmts {
let statement = &mut self.source[bb].statements[stmt_idx];
- let mut rhs = match statement.kind {
+ let rhs = match statement.kind {
StatementKind::Assign(_, ref mut rhs) => rhs,
- StatementKind::SetDiscriminant{ .. } =>
- span_bug!(statement.source_info.span,
- "cannot promote SetDiscriminant {:?}",
- statement),
+ _ => {
+ span_bug!(statement.source_info.span, "{:?} is not an assignment",
+ statement);
+ }
};
if self.keep_original {
rvalue = Some(rhs.clone());
StatementKind::Assign(_, ref mut rvalue) => {
mem::replace(rvalue, Rvalue::Use(new_operand))
}
- StatementKind::SetDiscriminant{ .. } => {
- span_bug!(statement.source_info.span,
- "cannot promote SetDiscriminant {:?}",
- statement);
- }
+ _ => bug!()
}
}
Candidate::ShuffleIndices(bb) => {
let statement = &mir[bb].statements[stmt_idx];
let dest = match statement.kind {
StatementKind::Assign(ref dest, _) => dest,
- StatementKind::SetDiscriminant{ .. } =>
- panic!("cannot promote SetDiscriminant"),
+ _ => {
+ span_bug!(statement.source_info.span,
+ "expected assignment to promote");
+ }
};
if let Lvalue::Temp(index) = *dest {
if temps[index] == TempState::PromotedOut {
for block in mir.basic_blocks_mut() {
block.statements.retain(|statement| {
match statement.kind {
- StatementKind::Assign(Lvalue::Temp(index), _) => {
+ StatementKind::Assign(Lvalue::Temp(index), _) |
+ StatementKind::StorageLive(Lvalue::Temp(index)) |
+ StatementKind::StorageDead(Lvalue::Temp(index)) => {
!promoted(index)
}
_ => true
fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>) {
assert_eq!(self.location.block, bb);
- self.nest(|this| this.super_statement(bb, statement));
+ self.nest(|this| {
+ this.visit_source_info(&statement.source_info);
+ match statement.kind {
+ StatementKind::Assign(ref lvalue, ref rvalue) => {
+ this.visit_assign(bb, lvalue, rvalue);
+ }
+ StatementKind::SetDiscriminant { .. } |
+ StatementKind::StorageLive(_) |
+ StatementKind::StorageDead(_) => {}
+ }
+ });
self.location.statement_index += 1;
}
variant_index);
};
}
+ StatementKind::StorageLive(ref lv) |
+ StatementKind::StorageDead(ref lv) => {
+ match *lv {
+ Lvalue::Temp(_) | Lvalue::Var(_) => {}
+ _ => {
+ span_mirbug!(self, stmt, "bad lvalue: expected temp or var");
+ }
+ }
+ }
}
}
LvalueContext::Call => {
self.mark_assigned(index);
}
- LvalueContext::Consume => {
- }
+
+ LvalueContext::StorageLive |
+ LvalueContext::StorageDead |
+ LvalueContext::Consume => {}
+
LvalueContext::Store |
LvalueContext::Inspect |
LvalueContext::Borrow { .. } |
LvalueContext::Projection => {
self.mark_as_lvalue(index);
}
+
LvalueContext::Drop => {
let ty = lvalue.ty(self.mir, self.bcx.tcx());
let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
Err(err) => if failure.is_ok() { failure = Err(err); }
}
}
+ mir::StatementKind::StorageLive(_) |
+ mir::StatementKind::StorageDead(_) => {}
mir::StatementKind::SetDiscriminant{ .. } => {
span_bug!(span, "SetDiscriminant should not appear in constants?");
}
use rustc::mir::repr as mir;
+use base;
use common::{self, BlockAndBuilder};
use super::MirContext;
);
bcx
}
+ mir::StatementKind::StorageLive(ref lvalue) => {
+ self.trans_storage_liveness(bcx, lvalue, base::Lifetime::Start)
+ }
+ mir::StatementKind::StorageDead(ref lvalue) => {
+ self.trans_storage_liveness(bcx, lvalue, base::Lifetime::End)
+ }
+ }
+ }
+
+ fn trans_storage_liveness(&self,
+ bcx: BlockAndBuilder<'bcx, 'tcx>,
+ lvalue: &mir::Lvalue<'tcx>,
+ intrinsic: base::Lifetime)
+ -> BlockAndBuilder<'bcx, 'tcx> {
+ if let Some(index) = self.mir.local_index(lvalue) {
+ if let LocalRef::Lvalue(tr_lval) = self.locals[index] {
+ intrinsic.call(&bcx, tr_lval.llval);
+ }
}
+ bcx
}
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -O -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(rustc_attrs)]
+
+// CHECK-LABEL: @test
+#[no_mangle]
+#[rustc_mir] // FIXME #27840 MIR has different codegen.
+pub fn test() {
+ let a = 0;
+ &a; // keep variable in an alloca
+
+// CHECK: [[S_a:%[0-9]+]] = bitcast i32* %a to i8*
+// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_a]])
+
+ {
+ let b = &Some(a);
+ &b; // keep variable in an alloca
+
+// CHECK: [[S_b:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"** %b to i8*
+// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_b]])
+
+// CHECK: [[S_tmp2:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"* %tmp2 to i8*
+// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_tmp2]])
+
+// CHECK: [[E_tmp2:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"* %tmp2 to i8*
+// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E_tmp2]])
+
+// CHECK: [[E_b:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"** %b to i8*
+// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E_b]])
+ }
+
+ let c = 1;
+ &c; // keep variable in an alloca
+
+// CHECK: [[S_c:%[0-9]+]] = bitcast i32* %c to i8*
+// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_c]])
+
+// CHECK: [[E_c:%[0-9]+]] = bitcast i32* %c to i8*
+// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E_c]])
+
+// CHECK: [[E_a:%[0-9]+]] = bitcast i32* %a to i8*
+// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E_a]])
+}
}
fn loop_expr(mut x: u64, y: u64, z: u64) -> u64 {
- loop { // #break
- x += z;
+ loop {
+ x += z; // #break
if x + y > 1000 {
return x;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let a = 0;
+ {
+ let b = &Some(a);
+ }
+ let c = 1;
+}
+
+// END RUST SOURCE
+// START rustc.node4.PreTrans.after.mir
+// bb0: {
+// StorageLive(var0); // scope 0 at storage_ranges.rs:12:9: 12:10
+// var0 = const 0i32; // scope 0 at storage_ranges.rs:12:13: 12:14
+// StorageLive(var1); // scope 1 at storage_ranges.rs:14:13: 14:14
+// StorageLive(tmp1); // scope 1 at storage_ranges.rs:14:18: 14:25
+// StorageLive(tmp2); // scope 1 at storage_ranges.rs:14:23: 14:24
+// tmp2 = var0; // scope 1 at storage_ranges.rs:14:23: 14:24
+// tmp1 = std::prelude::v1::Some<i32>(tmp2,); // scope 1 at storage_ranges.rs:14:18: 14:25
+// var1 = &tmp1; // scope 1 at storage_ranges.rs:14:17: 14:25
+// StorageDead(tmp2); // scope 1 at storage_ranges.rs:14:23: 14:24
+// tmp0 = (); // scope 2 at storage_ranges.rs:13:5: 15:6
+// StorageDead(tmp1); // scope 1 at storage_ranges.rs:14:18: 14:25
+// StorageDead(var1); // scope 1 at storage_ranges.rs:14:13: 14:14
+// StorageLive(var2); // scope 1 at storage_ranges.rs:16:9: 16:10
+// var2 = const 1i32; // scope 1 at storage_ranges.rs:16:13: 16:14
+// return = (); // scope 3 at storage_ranges.rs:11:11: 17:2
+// StorageDead(var2); // scope 1 at storage_ranges.rs:16:9: 16:10
+// StorageDead(var0); // scope 0 at storage_ranges.rs:12:9: 12:10
+// goto -> bb1; // scope 0 at storage_ranges.rs:11:1: 17:2
+// }
+//
+// bb1: {
+// return; // scope 0 at storage_ranges.rs:11:1: 17:2
+// }
+// END rustc.node4.PreTrans.after.mir