mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- mir::Operand::Consume(ref lvalue) => {
+ mir::Operand::Copy(ref lvalue) => {
+ lvalue.hash_stable(hcx, hasher);
+ }
+ mir::Operand::Move(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
}
mir::Operand::Constant(ref constant) => {
/// being nested in one another.
#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
pub enum Operand<'tcx> {
- Consume(Lvalue<'tcx>),
+ /// Copy: The value must be available for use afterwards.
+ ///
+ /// This implies that the type of the lvalue must be `Copy`; this is true
+ /// by construction during build, but also checked by the MIR type checker.
+ Copy(Lvalue<'tcx>),
+ /// Move: The value (including old borrows of it) will not be used again.
+ ///
+ /// Safe for values of all types (modulo future developments towards `?Move`).
+ /// Correct usage patterns are enforced by the borrow checker for safe code.
+ /// `Copy` may be converted to `Move` to enable "last-use" optimizations.
+ Move(Lvalue<'tcx>),
Constant(Box<Constant<'tcx>>),
}
use self::Operand::*;
match *self {
Constant(ref a) => write!(fmt, "{:?}", a),
- Consume(ref lv) => write!(fmt, "{:?}", lv),
+ Copy(ref lv) => write!(fmt, "{:?}", lv),
+ Move(ref lv) => write!(fmt, "move {:?}", lv),
}
}
}
impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
- Operand::Consume(ref lval) => Operand::Consume(lval.fold_with(folder)),
+ Operand::Copy(ref lval) => Operand::Copy(lval.fold_with(folder)),
+ Operand::Move(ref lval) => Operand::Move(lval.fold_with(folder)),
Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)),
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match *self {
- Operand::Consume(ref lval) => lval.visit_with(visitor),
+ Operand::Copy(ref lval) |
+ Operand::Move(ref lval) => lval.visit_with(visitor),
Operand::Constant(ref c) => c.visit_with(visitor)
}
}
where D: HasLocalDecls<'tcx>
{
match self {
- &Operand::Consume(ref l) => l.ty(local_decls, tcx).to_ty(tcx),
+ &Operand::Copy(ref l) |
+ &Operand::Move(ref l) => l.ty(local_decls, tcx).to_ty(tcx),
&Operand::Constant(ref c) => c.ty,
}
}
operand: & $($mutability)* Operand<'tcx>,
location: Location) {
match *operand {
- Operand::Consume(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, LvalueContext::Consume, location);
+ Operand::Copy(ref $($mutability)* lvalue) => {
+ self.visit_lvalue(lvalue, LvalueContext::Copy, location);
+ }
+ Operand::Move(ref $($mutability)* lvalue) => {
+ self.visit_lvalue(lvalue, LvalueContext::Move, location);
}
Operand::Constant(ref $($mutability)* constant) => {
self.visit_constant(constant, location);
self.visit_ty(ty, TyContext::Location(location));
}
ProjectionElem::Index(ref $($mutability)* local) => {
- self.visit_local(local, LvalueContext::Consume, location);
+ self.visit_local(local, LvalueContext::Copy, location);
}
ProjectionElem::ConstantIndex { offset: _,
min_length: _,
Projection(Mutability),
// Consumed as part of an operand
- Consume,
+ Copy,
+ Move,
// Starting and ending a storage live range
StorageLive,
LvalueContext::Inspect |
LvalueContext::Borrow { kind: BorrowKind::Shared, .. } |
LvalueContext::Borrow { kind: BorrowKind::Unique, .. } |
- LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume |
+ LvalueContext::Projection(Mutability::Not) |
+ LvalueContext::Copy | LvalueContext::Move |
LvalueContext::StorageLive | LvalueContext::StorageDead |
LvalueContext::Validate => false,
}
match *self {
LvalueContext::Inspect | LvalueContext::Borrow { kind: BorrowKind::Shared, .. } |
LvalueContext::Borrow { kind: BorrowKind::Unique, .. } |
- LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume => true,
+ LvalueContext::Projection(Mutability::Not) |
+ LvalueContext::Copy | LvalueContext::Move => true,
LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | LvalueContext::Store |
LvalueContext::Call | LvalueContext::Projection(Mutability::Mut) |
LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead |
use rustc_data_structures::indexed_vec::{Idx};
use syntax::ast::{self};
-use syntax_pos::{DUMMY_SP, Span};
+use syntax_pos::Span;
use dataflow::{do_dataflow};
use dataflow::{MoveDataParamEnv};
use util::borrowck_errors::{BorrowckErrors, Origin};
use self::MutateMode::{JustWrite, WriteAndRead};
-use self::ConsumeKind::{Consume};
pub fn provide(providers: &mut Providers) {
let id = tcx.hir.as_local_node_id(def_id)
.expect("do_mir_borrowck: non-local DefId");
- let move_data: MoveData<'tcx> = match MoveData::gather_moves(input_mir, tcx, param_env) {
+ let move_data: MoveData<'tcx> = match MoveData::gather_moves(input_mir, tcx) {
Ok(move_data) => move_data,
Err((move_data, move_errors)) => {
for move_error in move_errors {
flow_state);
}
StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
+ let context = ContextKind::InlineAsm.new(location);
for (o, output) in asm.outputs.iter().zip(outputs) {
if o.is_indirect {
- self.consume_lvalue(ContextKind::InlineAsm.new(location),
- Consume,
- (output, span),
- flow_state);
+ // FIXME(eddyb) indirect inline asm outputs should
+ // be encoeded through MIR lvalue derefs instead.
+ self.access_lvalue(context,
+ (output, span),
+ (Deep, Read(ReadKind::Copy)),
+ flow_state);
+ self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
+ (output, span), flow_state);
} else {
- self.mutate_lvalue(ContextKind::InlineAsm.new(location),
+ self.mutate_lvalue(context,
(output, span),
Deep,
if o.is_rw { WriteAndRead } else { JustWrite },
}
}
for input in inputs {
- self.consume_operand(ContextKind::InlineAsm.new(location),
- Consume,
- (input, span), flow_state);
+ self.consume_operand(context, (input, span), flow_state);
}
}
StatementKind::EndRegion(ref _rgn) => {
match term.kind {
TerminatorKind::SwitchInt { ref discr, switch_ty: _, values: _, targets: _ } => {
self.consume_operand(ContextKind::SwitchInt.new(loc),
- Consume,
(discr, span), flow_state);
}
TerminatorKind::Drop { location: ref drop_lvalue, target: _, unwind: _ } => {
- self.consume_lvalue(ContextKind::Drop.new(loc),
- ConsumeKind::Drop,
- (drop_lvalue, span), flow_state);
+ self.access_lvalue(ContextKind::Drop.new(loc),
+ (drop_lvalue, span),
+ (Deep, Write(WriteKind::StorageDeadOrDrop)),
+ flow_state);
}
TerminatorKind::DropAndReplace { location: ref drop_lvalue,
value: ref new_value,
JustWrite,
flow_state);
self.consume_operand(ContextKind::DropAndReplace.new(loc),
- ConsumeKind::Drop,
(new_value, span), flow_state);
}
TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => {
self.consume_operand(ContextKind::CallOperator.new(loc),
- Consume,
(func, span), flow_state);
for arg in args {
self.consume_operand(ContextKind::CallOperand.new(loc),
- Consume,
(arg, span), flow_state);
}
if let Some((ref dest, _/*bb*/)) = *destination {
}
TerminatorKind::Assert { ref cond, expected: _, ref msg, target: _, cleanup: _ } => {
self.consume_operand(ContextKind::Assert.new(loc),
- Consume,
(cond, span), flow_state);
match *msg {
AssertMessage::BoundsCheck { ref len, ref index } => {
self.consume_operand(ContextKind::Assert.new(loc),
- Consume,
(len, span), flow_state);
self.consume_operand(ContextKind::Assert.new(loc),
- Consume,
(index, span), flow_state);
}
AssertMessage::Math(_/*const_math_err*/) => {}
TerminatorKind::Yield { ref value, resume: _, drop: _} => {
self.consume_operand(ContextKind::Yield.new(loc),
- Consume, (value, span), flow_state);
+ (value, span), flow_state);
}
TerminatorKind::Resume |
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum MutateMode { JustWrite, WriteAndRead }
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-enum ConsumeKind { Drop, Consume }
-
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum Control { Continue, Break }
Rvalue::Repeat(ref operand, _) |
Rvalue::UnaryOp(_/*un_op*/, ref operand) |
Rvalue::Cast(_/*cast_kind*/, ref operand, _/*ty*/) => {
- self.consume_operand(context, Consume, (operand, span), flow_state)
+ self.consume_operand(context, (operand, span), flow_state)
}
Rvalue::Len(ref lvalue) |
Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2) |
Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
- self.consume_operand(context, Consume, (operand1, span), flow_state);
- self.consume_operand(context, Consume, (operand2, span), flow_state);
+ self.consume_operand(context, (operand1, span), flow_state);
+ self.consume_operand(context, (operand2, span), flow_state);
}
Rvalue::NullaryOp(_op, _ty) => {
Rvalue::Aggregate(ref _aggregate_kind, ref operands) => {
for operand in operands {
- self.consume_operand(context, Consume, (operand, span), flow_state);
+ self.consume_operand(context, (operand, span), flow_state);
}
}
}
fn consume_operand(&mut self,
context: Context,
- consume_via_drop: ConsumeKind,
(operand, span): (&Operand<'tcx>, Span),
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
match *operand {
- Operand::Consume(ref lvalue) => {
- self.consume_lvalue(context, consume_via_drop, (lvalue, span), flow_state)
- }
- Operand::Constant(_) => {}
- }
- }
-
- fn consume_lvalue(&mut self,
- context: Context,
- consume_via_drop: ConsumeKind,
- lvalue_span: (&Lvalue<'tcx>, Span),
- flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
- let lvalue = lvalue_span.0;
-
- let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
-
- // Erase the regions in type before checking whether it moves by
- // default. There are a few reasons to do this:
- //
- // - They should not affect the result.
- // - It avoids adding new region constraints into the surrounding context,
- // which would trigger an ICE, since the infcx will have been "frozen" by
- // the NLL region context.
- let gcx = self.tcx.global_tcx();
- let erased_ty = gcx.lift(&self.tcx.erase_regions(&ty)).unwrap();
- let moves_by_default = erased_ty.moves_by_default(gcx, self.param_env, DUMMY_SP);
-
- if moves_by_default {
- let kind = match consume_via_drop {
- ConsumeKind::Drop => WriteKind::StorageDeadOrDrop,
- _ => WriteKind::Move,
- };
-
- // move of lvalue: check if this is move of already borrowed path
- self.access_lvalue(context, lvalue_span, (Deep, Write(kind)), flow_state);
- } else {
- // copy of lvalue: check if this is "copy of frozen path"
- // (FIXME: see check_loans.rs)
- self.access_lvalue(context, lvalue_span, (Deep, Read(ReadKind::Copy)), flow_state);
- }
+ Operand::Copy(ref lvalue) => {
+ // copy of lvalue: check if this is "copy of frozen path"
+ // (FIXME: see check_loans.rs)
+ self.access_lvalue(context,
+ (lvalue, span),
+ (Deep, Read(ReadKind::Copy)),
+ flow_state);
- // Finally, check if path was already moved.
- match consume_via_drop {
- ConsumeKind::Drop => {
- // If path is merely being dropped, then we'll already
- // check the drop flag to see if it is moved (thus we
- // skip this check in that case).
+ // Finally, check if path was already moved.
+ self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
+ (lvalue, span), flow_state);
}
- ConsumeKind::Consume => {
+ Operand::Move(ref lvalue) => {
+ // move of lvalue: check if this is move of already borrowed path
+ self.access_lvalue(context,
+ (lvalue, span),
+ (Deep, Write(WriteKind::Move)),
+ flow_state);
+
+ // Finally, check if path was already moved.
self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
- lvalue_span, flow_state);
+ (lvalue, span), flow_state);
}
+ Operand::Constant(_) => {}
}
}
}
return None;
};
- self.tcx
- .with_freevars(node_id, |freevars| {
- for (v, lv) in freevars.iter().zip(lvs) {
- if let Operand::Consume(Lvalue::Local(l)) = *lv {
- if local == l {
- debug!(
- "find_closure_span: found captured local {:?}",
- l
- );
- return Some(v.span);
- }
+ self.tcx.with_freevars(node_id, |freevars| {
+ for (v, lv) in freevars.iter().zip(lvs) {
+ match *lv {
+ Operand::Copy(Lvalue::Local(l)) |
+ Operand::Move(Lvalue::Local(l)) if local == l => {
+ debug!(
+ "find_closure_span: found captured local {:?}",
+ l
+ );
+ return Some(v.span);
}
+ _ => {}
}
- None
- })
- .map(|var_span| (args_span, var_span))
+ }
+ None
+ }).map(|var_span| (args_span, var_span))
} else {
None
};
&len, Rvalue::Len(slice.clone()));
this.cfg.push_assign(block, source_info, // lt = idx < len
<, Rvalue::BinaryOp(BinOp::Lt,
- Operand::Consume(Lvalue::Local(idx)),
- Operand::Consume(len.clone())));
+ Operand::Copy(Lvalue::Local(idx)),
+ Operand::Copy(len.clone())));
let msg = AssertMessage::BoundsCheck {
- len: Operand::Consume(len),
- index: Operand::Consume(Lvalue::Local(idx))
+ len: Operand::Move(len),
+ index: Operand::Copy(Lvalue::Local(idx))
};
- let success = this.assert(block, Operand::Consume(lt), true,
+ let success = this.assert(block, Operand::Move(lt), true,
msg, expr_span);
success.and(slice.index(idx))
}
Category::Rvalue(..) => {
let operand =
unpack!(block = this.as_temp(block, scope, expr));
- block.and(Operand::Consume(Lvalue::Local(operand)))
+ block.and(Operand::Move(Lvalue::Local(operand)))
}
}
}
Rvalue::BinaryOp(BinOp::Eq, arg.clone(), minval));
let err = ConstMathErr::Overflow(Op::Neg);
- block = this.assert(block, Operand::Consume(is_min), false,
+ block = this.assert(block, Operand::Move(is_min), false,
AssertMessage::Math(err), expr_span);
}
block.and(Rvalue::UnaryOp(op, arg))
// initialize the box contents:
unpack!(block = this.into(&Lvalue::Local(result).deref(), block, value));
- block.and(Rvalue::Use(Operand::Consume(Lvalue::Local(result))))
+ block.and(Rvalue::Use(Operand::Move(Lvalue::Local(result))))
}
ExprKind::Cast { source } => {
let source = this.hir.mirror(source);
.zip(field_types.into_iter())
.map(|(n, ty)| match fields_map.get(&n) {
Some(v) => v.clone(),
- None => Operand::Consume(base.clone().field(n, ty))
+ None => this.consume_by_copy_or_move(base.clone().field(n, ty))
})
.collect()
} else {
}
});
- block = self.assert(block, Operand::Consume(of), false,
+ block = self.assert(block, Operand::Move(of), false,
AssertMessage::Math(err), span);
- block.and(Rvalue::Use(Operand::Consume(val)))
+ block.and(Rvalue::Use(Operand::Move(val)))
} else {
if ty.is_integral() && (op == BinOp::Div || op == BinOp::Rem) {
// Checking division and remainder is more complex, since we 1. always check
self.cfg.push_assign(block, source_info, &is_zero,
Rvalue::BinaryOp(BinOp::Eq, rhs.clone(), zero));
- block = self.assert(block, Operand::Consume(is_zero), false,
+ block = self.assert(block, Operand::Move(is_zero), false,
AssertMessage::Math(zero_err), span);
// We only need to check for the overflow in one case:
self.cfg.push_assign(block, source_info, &is_min,
Rvalue::BinaryOp(BinOp::Eq, lhs.clone(), min));
- let is_neg_1 = Operand::Consume(is_neg_1);
- let is_min = Operand::Consume(is_min);
+ let is_neg_1 = Operand::Move(is_neg_1);
+ let is_min = Operand::Move(is_min);
self.cfg.push_assign(block, source_info, &of,
Rvalue::BinaryOp(BinOp::BitAnd, is_neg_1, is_min));
- block = self.assert(block, Operand::Consume(of), false,
+ block = self.assert(block, Operand::Move(of), false,
AssertMessage::Math(overflow_err), span);
}
}
match Category::of(&expr.kind).unwrap() {
Category::Lvalue => {
let lvalue = unpack!(block = this.as_lvalue(block, expr));
- let rvalue = Rvalue::Use(Operand::Consume(lvalue));
+ let rvalue = Rvalue::Use(this.consume_by_copy_or_move(lvalue));
this.cfg.push_assign(block, source_info, &Lvalue::Local(temp), rvalue);
}
_ => {
// because AssignOp is only legal for Copy types
// (overloaded ops should be desugared into a call).
let result = unpack!(block = this.build_binary_op(block, op, expr_span, lhs_ty,
- Operand::Consume(lhs.clone()), rhs));
+ Operand::Copy(lhs.clone()), rhs));
this.cfg.push_assign(block, source_info, &lhs, result);
block.unit()
self.schedule_drop_for_binding(binding.var_id, binding.span);
let rvalue = match binding.binding_mode {
BindingMode::ByValue =>
- Rvalue::Use(Operand::Consume(binding.source)),
+ Rvalue::Use(self.consume_by_copy_or_move(binding.source)),
BindingMode::ByRef(region, borrow_kind) =>
Rvalue::Ref(region, borrow_kind, binding.source),
};
Rvalue::Discriminant(lvalue.clone()));
assert_eq!(values.len() + 1, targets.len());
self.cfg.terminate(block, source_info, TerminatorKind::SwitchInt {
- discr: Operand::Consume(discr),
+ discr: Operand::Move(discr),
switch_ty: discr_ty,
values: From::from(values),
targets,
ConstVal::Bool(false) => vec![false_bb, true_bb],
v => span_bug!(test.span, "expected boolean value but got {:?}", v)
};
- (ret, TerminatorKind::if_(self.hir.tcx(), Operand::Consume(lvalue.clone()),
+ (ret, TerminatorKind::if_(self.hir.tcx(), Operand::Copy(lvalue.clone()),
true_bb, false_bb))
} else {
// The switch may be inexhaustive so we
v.val.to_const_int().expect("switching on integral")
).collect();
(targets.clone(), TerminatorKind::SwitchInt {
- discr: Operand::Consume(lvalue.clone()),
+ discr: Operand::Copy(lvalue.clone()),
switch_ty,
values: From::from(values),
targets,
}
TestKind::Eq { value, mut ty } => {
- let mut val = Operand::Consume(lvalue.clone());
+ let mut val = Operand::Copy(lvalue.clone());
// If we're using b"..." as a pattern, we need to insert an
// unsizing coercion, as the byte string has the type &[u8; N].
let val_slice = self.temp(ty, test.span);
self.cfg.push_assign(block, source_info, &val_slice,
Rvalue::Cast(CastKind::Unsize, val, ty));
- val = Operand::Consume(val_slice);
+ val = Operand::Move(val_slice);
}
}
let slice = self.temp(ty, test.span);
self.cfg.push_assign(block, source_info, &slice,
Rvalue::Cast(CastKind::Unsize, array, ty));
- Operand::Consume(slice)
+ Operand::Move(slice)
} else {
self.literal_operand(test.span, ty, Literal::Value {
value
let block = self.cfg.start_new_block();
self.cfg.terminate(eq_block, source_info,
TerminatorKind::if_(self.hir.tcx(),
- Operand::Consume(eq_result),
+ Operand::Move(eq_result),
block, fail));
vec![block, fail]
} else {
// Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons.
let lo = self.literal_operand(test.span, ty.clone(), lo.clone());
let hi = self.literal_operand(test.span, ty.clone(), hi.clone());
- let val = Operand::Consume(lvalue.clone());
+ let val = Operand::Copy(lvalue.clone());
let fail = self.cfg.start_new_block();
let block = self.compare(block, fail, test.span, BinOp::Le, lo, val.clone());
// result = actual == expected OR result = actual < expected
self.cfg.push_assign(block, source_info, &result,
Rvalue::BinaryOp(op,
- Operand::Consume(actual),
- Operand::Consume(expected)));
+ Operand::Move(actual),
+ Operand::Move(expected)));
// branch based on result
let (false_bb, true_bb) = (self.cfg.start_new_block(),
self.cfg.start_new_block());
self.cfg.terminate(block, source_info,
- TerminatorKind::if_(self.hir.tcx(), Operand::Consume(result),
+ TerminatorKind::if_(self.hir.tcx(), Operand::Move(result),
true_bb, false_bb));
vec![true_bb, false_bb]
}
// branch based on result
let target_block = self.cfg.start_new_block();
self.cfg.terminate(block, source_info,
- TerminatorKind::if_(self.hir.tcx(), Operand::Consume(result),
+ TerminatorKind::if_(self.hir.tcx(), Operand::Move(result),
target_block, fail_block));
target_block
}
use rustc::mir::*;
use syntax::ast;
-use syntax_pos::Span;
+use syntax_pos::{Span, DUMMY_SP};
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Add a new temporary value of type `ty` storing the result of
});
temp
}
+
+ pub fn consume_by_copy_or_move(&self, lvalue: Lvalue<'tcx>) -> Operand<'tcx> {
+ let tcx = self.hir.tcx();
+ let ty = lvalue.ty(&self.local_decls, tcx).to_ty(tcx);
+ if self.hir.type_moves_by_default(ty, DUMMY_SP) {
+ Operand::Move(lvalue)
+ } else {
+ Operand::Copy(lvalue)
+ }
+ }
}
use rustc::util::nodemap::FxHashMap;
use rustc_data_structures::indexed_vec::{IndexVec};
-use syntax::codemap::DUMMY_SP;
-
use std::collections::hash_map::Entry;
use std::mem;
struct MoveDataBuilder<'a, 'gcx: 'tcx, 'tcx: 'a> {
mir: &'a Mir<'tcx>,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'gcx>,
data: MoveData<'tcx>,
errors: Vec<MoveError<'tcx>>,
}
impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> {
- fn new(mir: &'a Mir<'tcx>,
- tcx: TyCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'gcx>)
- -> Self {
+ fn new(mir: &'a Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self {
let mut move_paths = IndexVec::new();
let mut path_map = IndexVec::new();
let mut init_path_map = IndexVec::new();
MoveDataBuilder {
mir,
tcx,
- param_env,
errors: Vec::new(),
data: MoveData {
moves: IndexVec::new(),
}
}
-pub(super) fn gather_moves<'a, 'gcx, 'tcx>(mir: &Mir<'tcx>,
- tcx: TyCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'gcx>)
+pub(super) fn gather_moves<'a, 'gcx, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>)
-> Result<MoveData<'tcx>,
(MoveData<'tcx>, Vec<MoveError<'tcx>>)> {
- let mut builder = MoveDataBuilder::new(mir, tcx, param_env);
+ let mut builder = MoveDataBuilder::new(mir, tcx);
builder.gather_args();
}
StatementKind::StorageLive(_) => {}
StatementKind::StorageDead(local) => {
- self.gather_move(&Lvalue::Local(local), true);
+ self.gather_move(&Lvalue::Local(local));
}
StatementKind::SetDiscriminant{ .. } => {
span_bug!(stmt.source_info.span,
TerminatorKind::Unreachable => { }
TerminatorKind::Return => {
- self.gather_move(&Lvalue::Local(RETURN_POINTER), false);
+ self.gather_move(&Lvalue::Local(RETURN_POINTER));
}
TerminatorKind::Assert { .. } |
}
TerminatorKind::Drop { ref location, target: _, unwind: _ } => {
- self.gather_move(location, false);
+ self.gather_move(location);
}
TerminatorKind::DropAndReplace { ref location, ref value, .. } => {
self.create_move_path(location);
fn gather_operand(&mut self, operand: &Operand<'tcx>) {
match *operand {
- Operand::Constant(..) => {} // not-a-move
- Operand::Consume(ref lval) => { // a move
- self.gather_move(lval, false);
+ Operand::Constant(..) |
+ Operand::Copy(..) => {} // not-a-move
+ Operand::Move(ref lval) => { // a move
+ self.gather_move(lval);
}
}
}
- fn gather_move(&mut self, lval: &Lvalue<'tcx>, force: bool) {
+ fn gather_move(&mut self, lval: &Lvalue<'tcx>) {
debug!("gather_move({:?}, {:?})", self.loc, lval);
- let tcx = self.builder.tcx;
- let gcx = tcx.global_tcx();
- let lv_ty = lval.ty(self.builder.mir, tcx).to_ty(tcx);
- let erased_ty = gcx.lift(&tcx.erase_regions(&lv_ty)).unwrap();
- if !force && !erased_ty.moves_by_default(gcx, self.builder.param_env, DUMMY_SP) {
- debug!("gather_move({:?}, {:?}) - {:?} is Copy. skipping", self.loc, lval, lv_ty);
- return
- }
-
let path = match self.move_path_for(lval) {
Ok(path) | Err(MoveError::UnionMove { path }) => path,
Err(error @ MoveError::IllegalMove { .. }) => {
}
impl<'a, 'gcx, 'tcx> MoveData<'tcx> {
- pub fn gather_moves(mir: &Mir<'tcx>,
- tcx: TyCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'gcx>)
+ pub fn gather_moves(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>)
-> Result<Self, (Self, Vec<MoveError<'tcx>>)> {
- builder::gather_moves(mir, tcx, param_env)
+ builder::gather_moves(mir, tcx)
}
}
pub fn check_overflow(&self) -> bool {
self.check_overflow
}
+
+ pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ self.infcx.type_moves_by_default(self.param_env, ty, span)
+ }
}
fn lint_level_for_hir_id(tcx: TyCtxt, mut id: ast::NodeId) -> ast::NodeId {
let ret_statement = self.make_statement(
StatementKind::Assign(
Lvalue::Local(RETURN_POINTER),
- Rvalue::Use(Operand::Consume(rcvr))
+ Rvalue::Use(Operand::Copy(rcvr))
)
);
self.block(vec![ret_statement], TerminatorKind::Return, false);
// `let loc = Clone::clone(ref_loc);`
self.block(vec![statement], TerminatorKind::Call {
func,
- args: vec![Operand::Consume(ref_loc)],
+ args: vec![Operand::Move(ref_loc)],
destination: Some((loc.clone(), next)),
cleanup: Some(cleanup),
}, false);
let compute_cond = self.make_statement(
StatementKind::Assign(
cond.clone(),
- Rvalue::BinaryOp(BinOp::Ne, Operand::Consume(end), Operand::Consume(beg))
+ Rvalue::BinaryOp(BinOp::Ne, Operand::Copy(end), Operand::Copy(beg))
)
);
// `if end != beg { goto loop_body; } else { goto loop_end; }`
self.block(
vec![compute_cond],
- TerminatorKind::if_(tcx, Operand::Consume(cond), loop_body, loop_end),
+ TerminatorKind::if_(tcx, Operand::Move(cond), loop_body, loop_end),
is_cleanup
);
}
self.make_statement(
StatementKind::Assign(
ret_field,
- Rvalue::Use(Operand::Consume(cloned))
+ Rvalue::Use(Operand::Move(cloned))
)
),
self.make_statement(
Lvalue::Local(beg),
Rvalue::BinaryOp(
BinOp::Add,
- Operand::Consume(Lvalue::Local(beg)),
+ Operand::Copy(Lvalue::Local(beg)),
Operand::Constant(self.make_usize(1))
)
)
let ret_statement = self.make_statement(
StatementKind::Assign(
Lvalue::Local(RETURN_POINTER),
- Rvalue::Use(Operand::Consume(ret.clone())),
+ Rvalue::Use(Operand::Move(ret.clone())),
)
);
self.block(vec![ret_statement], TerminatorKind::Return, false);
Lvalue::Local(beg),
Rvalue::BinaryOp(
BinOp::Add,
- Operand::Consume(Lvalue::Local(beg)),
+ Operand::Copy(Lvalue::Local(beg)),
Operand::Constant(self.make_usize(1))
)
)
Lvalue::Local(RETURN_POINTER),
Rvalue::Aggregate(
box kind,
- returns.into_iter().map(Operand::Consume).collect()
+ returns.into_iter().map(Operand::Move).collect()
)
)
);
let mut statements = vec![];
let rcvr = match rcvr_adjustment {
- Adjustment::Identity => Operand::Consume(rcvr_l),
- Adjustment::Deref => Operand::Consume(rcvr_l.deref()),
+ Adjustment::Identity => Operand::Move(rcvr_l),
+ Adjustment::Deref => Operand::Copy(rcvr_l.deref()),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
let ref_rcvr = local_decls.push(temp_decl(
Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, rcvr_l)
)
});
- Operand::Consume(Lvalue::Local(ref_rcvr))
+ Operand::Move(Lvalue::Local(ref_rcvr))
}
};
if let Some(untuple_args) = untuple_args {
args.extend(untuple_args.iter().enumerate().map(|(i, ity)| {
let arg_lv = Lvalue::Local(Local::new(1+1));
- Operand::Consume(arg_lv.field(Field::new(i), *ity))
+ Operand::Move(arg_lv.field(Field::new(i), *ity))
}));
} else {
args.extend((1..sig.inputs().len()).map(|i| {
- Operand::Consume(Lvalue::Local(Local::new(1+i)))
+ Operand::Move(Lvalue::Local(Local::new(1+i)))
}));
}
Rvalue::Aggregate(
box AggregateKind::Adt(adt_def, variant_no, substs, None),
(1..sig.inputs().len()+1).map(|i| {
- Operand::Consume(Lvalue::Local(Local::new(i)))
+ Operand::Move(Lvalue::Local(Local::new(i)))
}).collect()
)
)
patch.add_statement(
loc, StatementKind::StorageLive(temp));
patch.add_assign(loc, Lvalue::Local(temp),
- Rvalue::Use(Operand::Consume(location.clone())));
+ Rvalue::Use(Operand::Move(location.clone())));
patch.patch_terminator(loc.block, TerminatorKind::Drop {
location: Lvalue::Local(temp),
target: storage_dead_block,
.chain(
args.iter().filter_map(|op| {
match op {
- &Operand::Consume(ref lval) =>
+ &Operand::Copy(ref lval) |
+ &Operand::Move(ref lval) =>
Some(lval_to_operand(lval.clone())),
&Operand::Constant(..) => { None },
}
block_data.statements.insert(i, release_stmt);
}
// Casts can change what validation does (e.g. unsizing)
- StatementKind::Assign(_, Rvalue::Cast(kind, Operand::Consume(_), _))
+ StatementKind::Assign(_, Rvalue::Cast(kind, Operand::Copy(_), _)) |
+ StatementKind::Assign(_, Rvalue::Cast(kind, Operand::Move(_), _))
if kind != CastKind::Misc =>
{
// Due to a lack of NLL; we can't capture anything directly here.
// Instead, we have to re-match and clone there.
let (dest_lval, src_lval) = match block_data.statements[i].kind {
StatementKind::Assign(ref dest_lval,
- Rvalue::Cast(_, Operand::Consume(ref src_lval), _)) =>
+ Rvalue::Cast(_, Operand::Copy(ref src_lval), _)) |
+ StatementKind::Assign(ref dest_lval,
+ Rvalue::Cast(_, Operand::Move(ref src_lval), _)) =>
{
(dest_lval.clone(), src_lval.clone())
},
StatementKind::Assign(Lvalue::Local(local), Rvalue::Use(ref operand)) if
local == dest_local => {
let maybe_action = match *operand {
- Operand::Consume(ref src_lvalue) => {
+ Operand::Copy(ref src_lvalue) |
+ Operand::Move(ref src_lvalue) => {
Action::local_copy(&mir, &def_use_analysis, src_lvalue)
}
Operand::Constant(ref src_constant) => {
match stmt.kind {
StatementKind::Assign(
Lvalue::Local(local),
- Rvalue::Use(Operand::Consume(Lvalue::Local(src_local))),
+ Rvalue::Use(Operand::Copy(Lvalue::Local(src_local))),
+ ) |
+ StatementKind::Assign(
+ Lvalue::Local(local),
+ Rvalue::Use(Operand::Move(Lvalue::Local(src_local))),
) if local == dest_local && dest_local == src_local => {}
_ => {
continue;
self.super_operand(operand, location);
match *operand {
- Operand::Consume(Lvalue::Local(local)) if local == self.dest_local => {}
+ Operand::Copy(Lvalue::Local(local)) |
+ Operand::Move(Lvalue::Local(local)) if local == self.dest_local => {}
_ => return,
}
_ => return
}
let param_env = tcx.param_env(src.def_id);
- let move_data = MoveData::gather_moves(mir, tcx, param_env).unwrap();
+ let move_data = MoveData::gather_moves(mir, tcx).unwrap();
let elaborate_patch = {
let mir = &*mir;
let env = MoveDataParamEnv {
}
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> {
- self.ctxt.drop_flag(path).map(Operand::Consume)
+ self.ctxt.drop_flag(path).map(Operand::Copy)
}
}
let ret_val = match data.terminator().kind {
TerminatorKind::Return => Some((1,
None,
- Operand::Consume(Lvalue::Local(self.new_ret_local)),
+ Operand::Move(Lvalue::Local(self.new_ret_local)),
None)),
TerminatorKind::Yield { ref value, resume, drop } => Some((0,
Some(resume),
let default_block = insert_term_block(mir, default);
let switch = TerminatorKind::SwitchInt {
- discr: Operand::Consume(transform.make_field(transform.state_field, tcx.types.u32)),
+ discr: Operand::Copy(transform.make_field(transform.state_field, tcx.types.u32)),
switch_ty: tcx.types.u32,
values: Cow::from(cases.iter().map(|&(i, _)| ConstInt::U32(i)).collect::<Vec<_>>()),
targets: cases.iter().map(|&(_, d)| d).chain(once(default_block)).collect(),
// needs to generate the cast.
// FIXME: we should probably just generate correct MIR in the first place...
- let arg = if let Operand::Consume(ref lval) = args[0] {
+ let arg = if let Operand::Move(ref lval) = args[0] {
lval.clone()
} else {
bug!("Constant arg to \"box_free\"");
};
let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty);
- let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(ref_tmp), ptr_ty);
+ let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Move(ref_tmp), ptr_ty);
let cast_tmp = LocalDecl::new_temp(ptr_ty, callsite.location.span);
let cast_tmp = caller_mir.local_decls.push(cast_tmp);
let tuple_tmp_args =
tuple_tys.iter().enumerate().map(|(i, ty)| {
// This is e.g. `tuple_tmp.0` in our example above.
- let tuple_field = Operand::Consume(tuple.clone().field(Field::new(i), ty));
+ let tuple_field = Operand::Move(tuple.clone().field(Field::new(i), ty));
// Spill to a local to make e.g. `tmp0`.
self.create_temp_if_necessary(tuple_field, callsite, caller_mir)
// FIXME: Analysis of the usage of the arguments to avoid
// unnecessary temporaries.
- if let Operand::Consume(Lvalue::Local(local)) = arg {
+ if let Operand::Move(Lvalue::Local(local)) = arg {
if caller_mir.local_kind(local) == LocalKind::Temp {
// Reuse the operand if it's a temporary already
return local;
}
_ => bug!("Detected `&*` but didn't find `&*`!"),
};
- *rvalue = Rvalue::Use(Operand::Consume(new_lvalue))
+ *rvalue = Rvalue::Use(Operand::Copy(new_lvalue))
}
if let Some(constant) = self.optimizations.arrays_lengths.remove(&location) {
rhs,
rhs_override_ty.unwrap())),
});
- rhs = Operand::Consume(Lvalue::Local(local));
+ rhs = Operand::Move(Lvalue::Local(local));
}
let call_did = check_lang_item_type(
_ => bug!("That should be all the checked ones?"),
};
Some(i)
-}
\ No newline at end of file
+}
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
match *operand {
- Operand::Consume(ref lvalue) => {
+ Operand::Copy(ref lvalue) |
+ Operand::Move(ref lvalue) => {
self.nest(|this| {
this.super_operand(operand, location);
this.try_consume();
self.const_fn_arg_vars.insert(index.index()) {
// Direct use of an argument is permitted.
- if let Rvalue::Use(Operand::Consume(Lvalue::Local(local))) = *rvalue {
- if self.mir.local_kind(local) == LocalKind::Arg {
- return;
+ match *rvalue {
+ Rvalue::Use(Operand::Copy(Lvalue::Local(local))) |
+ Rvalue::Use(Operand::Move(Lvalue::Local(local))) => {
+ if self.mir.local_kind(local) == LocalKind::Arg {
+ return;
+ }
}
+ _ => {}
}
// Avoid a generic error for other uses of arguments.
let attributes = tcx.get_attrs(def_id);
let param_env = tcx.param_env(def_id);
- let move_data = MoveData::gather_moves(mir, tcx, param_env).unwrap();
+ let move_data = MoveData::gather_moves(mir, tcx).unwrap();
let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env };
let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
let flow_inits =
};
assert!(args.len() == 1);
let peek_arg_lval = match args[0] {
- mir::Operand::Consume(ref lval @ mir::Lvalue::Local(_)) => Some(lval),
+ mir::Operand::Copy(ref lval @ mir::Lvalue::Local(_)) |
+ mir::Operand::Move(ref lval @ mir::Lvalue::Local(_)) => Some(lval),
_ => None,
};
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::SwitchInt {
- discr: Operand::Consume(discr),
+ discr: Operand::Move(discr),
switch_ty: discr_ty,
values: From::from(values.to_owned()),
targets: blocks,
kind: TerminatorKind::Call {
func: Operand::function_handle(tcx, drop_fn.def_id, substs,
self.source_info.span),
- args: vec![Operand::Consume(Lvalue::Local(ref_lvalue))],
+ args: vec![Operand::Move(Lvalue::Local(ref_lvalue))],
destination: Some((unit_temp, succ)),
cleanup: unwind.into_option(),
},
ptr_based: bool)
-> BasicBlock
{
- let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone());
+ let copy = |lv: &Lvalue<'tcx>| Operand::Copy(lv.clone());
+ let move_ = |lv: &Lvalue<'tcx>| Operand::Move(lv.clone());
let tcx = self.tcx();
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
let one = self.constant_usize(1);
let (ptr_next, cur_next) = if ptr_based {
- (Rvalue::Use(use_(&Lvalue::Local(cur))),
- Rvalue::BinaryOp(BinOp::Offset, use_(&Lvalue::Local(cur)), one))
+ (Rvalue::Use(copy(&Lvalue::Local(cur))),
+ Rvalue::BinaryOp(BinOp::Offset, copy(&Lvalue::Local(cur)), one))
} else {
(Rvalue::Ref(
tcx.types.re_erased,
BorrowKind::Mut,
self.lvalue.clone().index(cur)),
- Rvalue::BinaryOp(BinOp::Add, use_(&Lvalue::Local(cur)), one))
+ Rvalue::BinaryOp(BinOp::Add, copy(&Lvalue::Local(cur)), one))
};
let drop_block = BasicBlockData {
let loop_block = BasicBlockData {
statements: vec![
self.assign(can_go, Rvalue::BinaryOp(BinOp::Eq,
- use_(&Lvalue::Local(cur)),
- use_(length_or_end)))
+ copy(&Lvalue::Local(cur)),
+ copy(length_or_end)))
],
is_cleanup: unwind.is_cleanup(),
terminator: Some(Terminator {
source_info: self.source_info,
- kind: TerminatorKind::if_(tcx, use_(can_go), succ, drop_block)
+ kind: TerminatorKind::if_(tcx, move_(can_go), succ, drop_block)
})
};
let loop_block = self.elaborator.patch().new_block(loop_block);
let tcx = self.tcx();
- let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone());
+ let move_ = |lv: &Lvalue<'tcx>| Operand::Move(lv.clone());
let size = &Lvalue::Local(self.new_temp(tcx.types.usize));
let size_is_zero = &Lvalue::Local(self.new_temp(tcx.types.bool));
let base_block = BasicBlockData {
statements: vec![
self.assign(size, Rvalue::NullaryOp(NullOp::SizeOf, ety)),
self.assign(size_is_zero, Rvalue::BinaryOp(BinOp::Eq,
- use_(size),
+ move_(size),
self.constant_usize(0)))
],
is_cleanup: self.unwind.is_cleanup(),
source_info: self.source_info,
kind: TerminatorKind::if_(
tcx,
- use_(size_is_zero),
+ move_(size_is_zero),
self.drop_loop_pair(ety, false),
self.drop_loop_pair(ety, true)
)
tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone()
)));
drop_block_stmts.push(self.assign(&cur, Rvalue::Cast(
- CastKind::Misc, Operand::Consume(tmp.clone()), iter_ty
+ CastKind::Misc, Operand::Move(tmp.clone()), iter_ty
)));
drop_block_stmts.push(self.assign(&length_or_end,
Rvalue::BinaryOp(BinOp::Offset,
- Operand::Consume(cur.clone()), Operand::Consume(length.clone())
+ Operand::Copy(cur.clone()), Operand::Move(length.clone())
)));
} else {
// index = 0 (length already pushed)
let call = TerminatorKind::Call {
func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
- args: vec![Operand::Consume(self.lvalue.clone())],
+ args: vec![Operand::Move(self.lvalue.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}; // FIXME(#6393)
LvalueContext::Borrow { .. } |
LvalueContext::Inspect |
- LvalueContext::Consume |
+ LvalueContext::Copy |
+ LvalueContext::Move |
LvalueContext::Validate => {
if self.mode.include_regular_use {
self.defs_uses.add_use(local);
location: Location) {
self.record("Operand", operand);
self.record(match *operand {
- Operand::Consume(..) => "Operand::Consume",
+ Operand::Copy(..) => "Operand::Copy",
+ Operand::Move(..) => "Operand::Move",
Operand::Constant(..) => "Operand::Constant",
}, operand);
self.super_operand(operand, location);
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
- if let mir::Operand::Consume(ref lvalue) = args[0] {
+ if let mir::Operand::Move(ref lvalue) = args[0] {
self.visit_lvalue(lvalue, LvalueContext::Drop, location);
}
}
if let mir::Lvalue::Projection(ref proj) = *lvalue {
// Allow uses of projections that are ZSTs or from scalar fields.
- if let LvalueContext::Consume = context {
+ let is_consume = match context {
+ LvalueContext::Copy | LvalueContext::Move => true,
+ _ => false
+ };
+ if is_consume {
let base_ty = proj.base.ty(self.cx.mir, ccx.tcx());
let base_ty = self.cx.monomorphize(&base_ty);
if let mir::ProjectionElem::Field(..) = proj.elem {
let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx()));
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
- // Recurse as a `Consume` instead of `Projection`,
+ // Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
// which would trigger `mark_as_lvalue` on locals.
- self.visit_lvalue(&proj.base, LvalueContext::Consume, location);
+ self.visit_lvalue(&proj.base, context, location);
return;
}
}
// A deref projection only reads the pointer, never needs the lvalue.
if let mir::ProjectionElem::Deref = proj.elem {
- return self.visit_lvalue(&proj.base, LvalueContext::Consume, location);
+ return self.visit_lvalue(&proj.base, LvalueContext::Copy, location);
}
}
LvalueContext::StorageLive |
LvalueContext::StorageDead |
LvalueContext::Validate |
- LvalueContext::Consume => {}
+ LvalueContext::Copy |
+ LvalueContext::Move => {}
LvalueContext::Inspect |
LvalueContext::Store |
// promotes any complex rvalues to constants.
if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") {
match *arg {
- mir::Operand::Consume(_) => {
+ mir::Operand::Copy(_) |
+ mir::Operand::Move(_) => {
span_bug!(span, "shuffle indices must be constant");
}
mir::Operand::Constant(ref constant) => {
// The callee needs to own the argument memory if we pass it
// by-ref, so make a local copy of non-immediate constants.
- if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) {
- let tmp = LvalueRef::alloca(&bcx, op.layout, "const");
- op.val.store(&bcx, tmp);
- op.val = Ref(tmp.llval, tmp.alignment);
+ match (arg, op.val) {
+ (&mir::Operand::Copy(_), Ref(..)) |
+ (&mir::Operand::Constant(_), Ref(..)) => {
+ let tmp = LvalueRef::alloca(&bcx, op.layout, "const");
+ op.val.store(&bcx, tmp);
+ op.val = Ref(tmp.llval, tmp.alignment);
+ }
+ _ => {}
}
self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]);
(Base::Value(llprojected), llextra)
}
mir::ProjectionElem::Index(index) => {
- let index = &mir::Operand::Consume(mir::Lvalue::Local(index));
+ let index = &mir::Operand::Copy(mir::Lvalue::Local(index));
let llindex = self.const_operand(index, span)?.llval;
let iv = if let Some(iv) = common::const_to_opt_u128(llindex, false) {
-> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
debug!("const_operand({:?} @ {:?})", operand, span);
let result = match *operand {
- mir::Operand::Consume(ref lvalue) => {
+ mir::Operand::Copy(ref lvalue) |
+ mir::Operand::Move(ref lvalue) => {
Ok(self.const_lvalue(lvalue, span)?.to_const(span))
}
tr_base.project_field(bcx, field.index())
}
mir::ProjectionElem::Index(index) => {
- let index = &mir::Operand::Consume(mir::Lvalue::Local(index));
+ let index = &mir::Operand::Copy(mir::Lvalue::Local(index));
let index = self.trans_operand(bcx, index);
let llindex = index.immediate();
tr_base.project_index(bcx, llindex)
debug!("trans_operand(operand={:?})", operand);
match *operand {
- mir::Operand::Consume(ref lvalue) => {
+ mir::Operand::Copy(ref lvalue) |
+ mir::Operand::Move(ref lvalue) => {
self.trans_consume(bcx, lvalue)
}