}
}
+pub enum RvalueInitializationState {
+ Shallow,
+ Deep
+}
+
impl<'tcx> Rvalue<'tcx> {
pub fn ty<'a, 'gcx, D>(&self, local_decls: &D, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>
where D: HasLocalDecls<'tcx>
}
}
}
+
+ #[inline]
+ /// Returns whether this rvalue is deeply initialized (most rvalues) or
+ /// whether its only shallowly initialized (`Rvalue::Box`).
+ pub fn initialization_state(&self) -> RvalueInitializationState {
+ match *self {
+ Rvalue::NullaryOp(NullOp::Box, _) => RvalueInitializationState::Shallow,
+ _ => RvalueInitializationState::Deep
+ }
+ }
}
impl<'tcx> Operand<'tcx> {
}
block.and(Rvalue::UnaryOp(op, arg))
}
- ExprKind::Box { value, value_extents } => {
+ ExprKind::Box { value } => {
let value = this.hir.mirror(value);
let result = this.temp(expr.ty, expr_span);
// to start, malloc some memory of suitable type (thus far, uninitialized):
let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty);
this.cfg.push_assign(block, source_info, &result, box_);
- this.in_scope((value_extents, source_info), block, |this| {
+ if let Some(scope) = scope {
// schedule a shallow free of that memory, lest we unwind:
- this.schedule_box_free(expr_span, value_extents, &result, value.ty);
- // initialize the box contents:
- unpack!(block = this.into(&result.clone().deref(), block, value));
- block.and(Rvalue::Use(Operand::Consume(result)))
- })
+ this.schedule_drop(expr_span, scope, &result, value.ty);
+ }
+ // initialize the box contents:
+ unpack!(block = this.into(&result.clone().deref(), block, value));
+ block.and(Rvalue::Use(Operand::Consume(result)))
}
ExprKind::Cast { source } => {
let source = this.hir.mirror(source);
use build::{BlockAnd, BlockAndExtension, Builder, CFG};
use rustc::middle::region::CodeExtent;
-use rustc::middle::lang_items;
-use rustc::middle::const_val::ConstVal;
-use rustc::ty::subst::{Kind, Subst};
use rustc::ty::{Ty, TyCtxt};
use rustc::mir::*;
use rustc::mir::transform::MirSource;
/// end of the vector (top of the stack) first.
drops: Vec<DropData<'tcx>>,
- /// A scope may only have one associated free, because:
- ///
- /// 1. We require a `free` to only be scheduled in the scope of
- /// `EXPR` in `box EXPR`;
- /// 2. It only makes sense to have it translated into the diverge-path.
- ///
- /// This kind of drop will be run *after* all the regular drops
- /// scheduled onto this scope, because drops may have dependencies
- /// on the allocated memory.
- ///
- /// This is expected to go away once `box EXPR` becomes a sugar
- /// for placement protocol and gets desugared in some earlier
- /// stage.
- free: Option<FreeData<'tcx>>,
-
/// The cache for drop chain on “normal” exit into a particular BasicBlock.
cached_exits: FxHashMap<(BasicBlock, CodeExtent), BasicBlock>,
}
Storage
}
-#[derive(Debug)]
-struct FreeData<'tcx> {
- /// span where free obligation was incurred
- span: Span,
-
- /// Lvalue containing the allocated box.
- value: Lvalue<'tcx>,
-
- /// type of item for which the box was allocated for (i.e. the T in Box<T>).
- item_ty: Ty<'tcx>,
-
- /// The cached block containing code to run the free. The block will also execute all the drops
- /// in the scope.
- cached_block: Option<BasicBlock>
-}
-
#[derive(Clone, Debug)]
pub struct BreakableScope<'tcx> {
/// Extent of the loop
*cached_block = None;
}
}
- if let Some(ref mut freedata) = self.free {
- freedata.cached_block = None;
- }
}
/// Returns the cached entrypoint for diverging exit from this scope.
});
if let Some(cached_block) = drops.next() {
Some(cached_block.expect("drop cache is not filled"))
- } else if let Some(ref data) = self.free {
- Some(data.cached_block.expect("free cache is not filled"))
} else {
None
}
extent_span: extent.1.span,
needs_cleanup: false,
drops: vec![],
- free: None,
cached_exits: FxHashMap()
});
}
});
let len = self.scopes.len();
assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
- let tmp = self.get_unit_temp();
// If we are emitting a `drop` statement, we need to have the cached
// diverge cleanup pads ready in case that drop panics.
// End all regions for scopes out of which we are breaking.
self.cfg.push_end_region(block, extent.1, scope.extent);
-
- if let Some(ref free_data) = scope.free {
- let next = self.cfg.start_new_block();
- let free = build_free(self.hir.tcx(), &tmp, free_data, next);
- self.cfg.terminate(block, scope.source_info(span), free);
- block = next;
- }
}
}
let scope = &self.scopes[len - scope_count];
span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
}
- /// Schedule dropping of a not-yet-fully-initialised box.
- ///
- /// This cleanup will only be translated into unwind branch.
- /// The extent should be for the `EXPR` inside `box EXPR`.
- /// There may only be one “free” scheduled in any given scope.
- pub fn schedule_box_free(&mut self,
- span: Span,
- extent: CodeExtent,
- value: &Lvalue<'tcx>,
- item_ty: Ty<'tcx>) {
- for scope in self.scopes.iter_mut().rev() {
- // See the comment in schedule_drop above. The primary difference is that we invalidate
- // the unwind blocks unconditionally. That’s because the box free may be considered
- // outer-most cleanup within the scope.
- scope.invalidate_cache(true);
- if scope.extent == extent {
- assert!(scope.free.is_none(), "scope already has a scheduled free!");
- scope.needs_cleanup = true;
- scope.free = Some(FreeData {
- span: span,
- value: value.clone(),
- item_ty: item_ty,
- cached_block: None
- });
- return;
- }
- }
- span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
- }
-
// Other
// =====
/// Creates a path that performs all required cleanup for unwinding.
}
assert!(!self.scopes.is_empty()); // or `any` above would be false
- let unit_temp = self.get_unit_temp();
let Builder { ref mut hir, ref mut cfg, ref mut scopes,
ref mut cached_resume_block, .. } = *self;
for scope in scopes.iter_mut() {
target = build_diverge_scope(
- hir.tcx(), cfg, &unit_temp, scope.extent_span, scope, target);
+ hir.tcx(), cfg, scope.extent_span, scope, target);
}
Some(target)
}
block.unit()
}
-fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+fn build_diverge_scope<'a, 'gcx, 'tcx>(_tcx: TyCtxt<'a, 'gcx, 'tcx>,
cfg: &mut CFG<'tcx>,
- unit_temp: &Lvalue<'tcx>,
span: Span,
scope: &mut Scope<'tcx>,
mut target: BasicBlock)
scope: visibility_scope
};
- // Next, build up any free.
- if let Some(ref mut free_data) = scope.free {
- target = if let Some(cached_block) = free_data.cached_block {
- cached_block
- } else {
- let into = cfg.start_new_cleanup_block();
- cfg.terminate(into, source_info(free_data.span),
- build_free(tcx, unit_temp, free_data, target));
- free_data.cached_block = Some(into);
- into
- };
- }
-
// Next, build up the drops. Here we iterate the vector in
// *forward* order, so that we generate drops[0] first (right to
// left in diagram above).
target
}
-
-fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- unit_temp: &Lvalue<'tcx>,
- data: &FreeData<'tcx>,
- target: BasicBlock)
- -> TerminatorKind<'tcx> {
- let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
- let substs = tcx.intern_substs(&[Kind::from(data.item_ty)]);
- TerminatorKind::Call {
- func: Operand::Constant(box Constant {
- span: data.span,
- ty: tcx.type_of(free_func).subst(tcx, substs),
- literal: Literal::Value {
- value: ConstVal::Function(free_func, substs),
- }
- }),
- args: vec![Operand::Consume(data.value.clone())],
- destination: Some((unit_temp.clone(), target)),
- cleanup: None
- }
-}
let lookup_result = move_data.rev_lookup.find(&lvalue);
on_lookup_result_bits(tcx, mir, move_data,
lookup_result,
- |moi| callback(moi, DropFlagState::Present));
+ |mpi| callback(mpi, DropFlagState::Present));
}
}
on_all_children_bits(tcx, mir, move_data,
path,
- |moi| callback(moi, DropFlagState::Absent))
+ |mpi| callback(mpi, DropFlagState::Absent))
}
let block = &mir[loc.block];
mir::StatementKind::SetDiscriminant{ .. } => {
span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck");
}
- mir::StatementKind::Assign(ref lvalue, _) => {
- debug!("drop_flag_effects: assignment {:?}", stmt);
- on_lookup_result_bits(tcx, mir, move_data,
- move_data.rev_lookup.find(lvalue),
- |moi| callback(moi, DropFlagState::Present))
+ mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
+ match rvalue.initialization_state() {
+ mir::tcx::RvalueInitializationState::Shallow => {
+ debug!("drop_flag_effects: box assignment {:?}", stmt);
+ if let LookupResult::Exact(mpi) = move_data.rev_lookup.find(lvalue) {
+ callback(mpi, DropFlagState::Present);
+ }
+ }
+ mir::tcx::RvalueInitializationState::Deep => {
+ debug!("drop_flag_effects: assignment {:?}", stmt);
+ on_lookup_result_bits(tcx, mir, move_data,
+ move_data.rev_lookup.find(lvalue),
+ |mpi| callback(mpi, DropFlagState::Present))
+ }
+ }
}
mir::StatementKind::StorageLive(_) |
mir::StatementKind::StorageDead(_) |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
on_lookup_result_bits(tcx, mir, move_data,
move_data.rev_lookup.find(location),
- |moi| callback(moi, DropFlagState::Present))
+ |mpi| callback(mpi, DropFlagState::Present))
}
_ => {
// other terminators do not contain move-ins
use util::elaborate_drops::DropFlagState;
use super::move_paths::{HasMoveData, MoveData, MoveOutIndex, MovePathIndex};
+use super::move_paths::LookupResult;
use super::{BitDenotation, BlockSets, DataflowOperator};
use super::drop_flag_effects_for_function_entry;
mir::StatementKind::SetDiscriminant { .. } => {
span_bug!(stmt.source_info.span, "SetDiscriminant should not exist in borrowck");
}
- mir::StatementKind::Assign(ref lvalue, _) => {
+ mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
// assigning into this `lvalue` kills all
// MoveOuts from it, and *also* all MoveOuts
// for children and associated fragment sets.
- on_lookup_result_bits(tcx,
- mir,
- move_data,
- rev_lookup.find(lvalue),
- |mpi| for moi in &path_map[mpi] {
- assert!(moi.index() < bits_per_block);
- sets.kill_set.add(&moi);
- });
+ match rvalue.initialization_state() {
+ mir::tcx::RvalueInitializationState::Shallow => {
+ if let LookupResult::Exact(mpi) = rev_lookup.find(lvalue) {
+ for moi in &path_map[mpi] {
+ assert!(moi.index() < bits_per_block);
+ sets.kill_set.add(&moi);
+ }
+ }
+ }
+ mir::tcx::RvalueInitializationState::Deep => {
+ on_lookup_result_bits(tcx,
+ mir,
+ move_data,
+ rev_lookup.find(lvalue),
+ |mpi| for moi in &path_map[mpi] {
+ assert!(moi.index() < bits_per_block);
+ sets.kill_set.add(&moi);
+ });
+ }
+ }
}
mir::StatementKind::StorageLive(_) |
mir::StatementKind::StorageDead(_) |
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
+use rustc::mir::tcx::RvalueInitializationState;
use rustc::util::nodemap::FxHashMap;
use rustc_data_structures::indexed_vec::{IndexVec};
match stmt.kind {
StatementKind::Assign(ref lval, ref rval) => {
self.create_move_path(lval);
+ if let RvalueInitializationState::Shallow = rval.initialization_state() {
+ // Box starts out uninitialized - need to create a separate
+ // move-path for the interior so it will be separate from
+ // the exterior.
+ self.create_move_path(&lval.clone().deref());
+ }
self.gather_rvalue(loc, rval);
}
StatementKind::StorageLive(_) |
hir::ExprBox(ref value) => {
ExprKind::Box {
value: value.to_ref(),
- value_extents: CodeExtent::Misc(value.id),
}
}
hir::ExprArray(ref fields) => ExprKind::Array { fields: fields.to_ref() },
},
Box {
value: ExprRef<'tcx>,
- value_extents: CodeExtent,
},
Call {
ty: ty::Ty<'tcx>,
// let _1: D;
// let _2: i32;
// let _3: &'6_2rce i32;
-// let _7: &'6_4rce i32;
+// let _6: &'6_4rce i32;
// let mut _4: ();
// let mut _5: i32;
-// let mut _6: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
// }
// bb1: {
// StorageDead(_5);
-// StorageLive(_7);
-// _7 = &'6_4rce _2;
+// StorageLive(_6);
+// _6 = &'6_4rce _2;
// _0 = ();
-// StorageDead(_7);
+// StorageDead(_6);
// EndRegion('6_4rce);
// StorageDead(_3);
// EndRegion('6_2rce);
// let mut _2: ();
// let mut _3: [closure@NodeId(18) d:&'19mce D];
// let mut _4: &'19mce D;
-// let mut _5: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
// let mut _2: ();
// let mut _3: [closure@NodeId(22) d:&'23mce D];
// let mut _4: &'23mce D;
-// let mut _5: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
// let mut _2: ();
// let mut _3: [closure@NodeId(22) d:D];
// let mut _4: D;
-// let mut _5: ();
//
// bb0: {
// StorageLive(_1);
// let mut _0: i32;
// let _2: &'14_0rce D;
// let mut _3: i32;
-// let mut _4: ();
//
// bb0: {
// StorageLive(_2);
// let mut _3: ();
// let mut _4: [closure@NodeId(22) r:&'6_1rce D];
// let mut _5: &'6_1rce D;
-// let mut _6: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
// let mut _2: S;
// let mut _3: S;
// let mut _4: S;
-// let mut _5: ();
-// let mut _6: bool;
+// let mut _5: bool;
//
// bb0: {
// END rustc.node4.ElaborateDrops.after.mir
// let mut _2: S;
// let mut _3: ();
// let mut _4: S;
-// let mut _5: ();
-// let mut _6: S;
-// let mut _7: bool;
+// let mut _5: S;
+// let mut _6: bool;
//
// bb0: {
// END rustc.node13.ElaborateDrops.after.mir
let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()];
}
+#[allow(unreachable_code)]
+fn vec_unreachable(a: &Allocator) {
+ let _x = vec![a.alloc(), a.alloc(), a.alloc(), return];
+}
+
fn run_test<F>(mut f: F)
where F: FnMut(&Allocator)
{
run_test(|a| array_simple(a));
run_test(|a| vec_simple(a));
+ run_test(|a| vec_unreachable(a));
run_test(|a| struct_dynamic_drop(a, false, false, false));
run_test(|a| struct_dynamic_drop(a, false, false, true));