// except according to those terms.
use build::{BlockAnd, BlockAndExtension, Builder};
+use build::scope::LoopScope;
use hair::*;
+use rustc::middle::region::CodeExtent;
use rustc::mir::repr::*;
use rustc::hir;
+use syntax::codemap::Span;
impl<'a,'tcx> Builder<'a,'tcx> {
pub fn ast_block(&mut self,
StmtKind::Expr { scope, expr } => {
unpack!(block = this.in_scope(scope, block, |this, _| {
let expr = this.hir.mirror(expr);
- let expr_span = expr.span;
- let temp = this.temp(expr.ty.clone());
- unpack!(block = this.into(&temp, block, expr));
- unpack!(block = this.build_drop(block, expr_span, temp));
- block.unit()
+ this.stmt_expr(block, expr)
}));
}
StmtKind::Let { remainder_scope, init_scope, pattern, initializer } => {
block.unit()
})
}
+
+ pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd<()> {
+ let this = self;
+ let expr_span = expr.span;
+ let scope_id = this.innermost_scope_id();
+ // Handle a number of expressions that don't need a destination at all. This
+ // avoids needing a mountain of temporary `()` variables.
+ match expr.kind {
+ ExprKind::Scope { extent, value } => {
+ let value = this.hir.mirror(value);
+ this.in_scope(extent, block, |this, _| this.stmt_expr(block, value))
+ }
+ ExprKind::Assign { lhs, rhs } => {
+ let lhs = this.hir.mirror(lhs);
+ let scope_id = this.innermost_scope_id();
+ let lhs_span = lhs.span;
+ let lhs_ty = lhs.ty;
+
+ let lhs_needs_drop = this.hir.needs_drop(lhs_ty);
+
+ // Note: we evaluate assignments right-to-left. This
+ // is better for borrowck interaction with overloaded
+ // operators like x[j] = x[i].
+
+ // Generate better code for things that don't need to be
+ // dropped. We need the temporary as_operand generates
+ // so we can clean up the data if evaluating the LHS unwinds,
+ // but if the LHS (and therefore the RHS) doesn't need
+ // unwinding, we just translate directly to an rvalue instead.
+ let rhs = if lhs_needs_drop {
+ let op = unpack!(block = this.as_operand(block, rhs));
+ Rvalue::Use(op)
+ } else {
+ unpack!(block = this.as_rvalue(block, rhs))
+ };
+
+ let lhs = unpack!(block = this.as_lvalue(block, lhs));
+ unpack!(block = this.build_drop(block, lhs_span, lhs.clone(), lhs_ty));
+ this.cfg.push_assign(block, scope_id, expr_span, &lhs, rhs);
+ block.unit()
+ }
+ ExprKind::AssignOp { op, lhs, rhs } => {
+ // FIXME(#28160) there is an interesting semantics
+ // question raised here -- should we "freeze" the
+ // value of the lhs here? I'm inclined to think not,
+ // since it seems closer to the semantics of the
+ // overloaded version, which takes `&mut self`. This
+ // only affects weird things like `x += {x += 1; x}`
+ // -- is that equal to `x + (x + 1)` or `2*(x+1)`?
+
+ // As above, RTL.
+ let rhs = unpack!(block = this.as_operand(block, rhs));
+ let lhs = unpack!(block = this.as_lvalue(block, lhs));
+
+ // we don't have to drop prior contents or anything
+ // because AssignOp is only legal for Copy types
+ // (overloaded ops should be desugared into a call).
+ this.cfg.push_assign(block, scope_id, expr_span, &lhs,
+ Rvalue::BinaryOp(op,
+ Operand::Consume(lhs.clone()),
+ rhs));
+
+ block.unit()
+ }
+ ExprKind::Continue { label } => {
+ this.break_or_continue(expr_span, label, block,
+ |loop_scope| loop_scope.continue_block)
+ }
+ ExprKind::Break { label } => {
+ this.break_or_continue(expr_span, label, block, |loop_scope| {
+ loop_scope.might_break = true;
+ loop_scope.break_block
+ })
+ }
+ ExprKind::Return { value } => {
+ block = match value {
+ Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)),
+ None => {
+ this.cfg.push_assign_unit(block, scope_id,
+ expr_span, &Lvalue::ReturnPointer);
+ block
+ }
+ };
+ let extent = this.extent_of_return_scope();
+ let return_block = this.return_block();
+ this.exit_scope(expr_span, extent, block, return_block);
+ this.cfg.start_new_block().unit()
+ }
+ _ => {
+ let expr_span = expr.span;
+ let expr_ty = expr.ty;
+ let temp = this.temp(expr.ty.clone());
+ unpack!(block = this.into(&temp, block, expr));
+ unpack!(block = this.build_drop(block, expr_span, temp, expr_ty));
+ block.unit()
+ }
+ }
+ }
+
+ fn break_or_continue<F>(&mut self,
+ span: Span,
+ label: Option<CodeExtent>,
+ block: BasicBlock,
+ exit_selector: F)
+ -> BlockAnd<()>
+ where F: FnOnce(&mut LoopScope) -> BasicBlock
+ {
+ let (exit_block, extent) = {
+ let loop_scope = self.find_loop_scope(span, label);
+ (exit_selector(loop_scope), loop_scope.extent)
+ };
+ self.exit_scope(span, extent, block, exit_block);
+ self.cfg.start_new_block().unit()
+ }
}
use build::{BlockAnd, BlockAndExtension, Builder};
use build::expr::category::{Category, RvalueFunc};
-use build::scope::LoopScope;
use hair::*;
-use rustc::middle::region::CodeExtent;
use rustc::ty;
use rustc::mir::repr::*;
-use syntax::codemap::Span;
impl<'a,'tcx> Builder<'a,'tcx> {
/// Compile `expr`, storing the result into `destination`, which
}
exit_block.unit()
}
- ExprKind::Assign { lhs, rhs } => {
- // Note: we evaluate assignments right-to-left. This
- // is better for borrowck interaction with overloaded
- // operators like x[j] = x[i].
- let lhs = this.hir.mirror(lhs);
- let lhs_span = lhs.span;
- let rhs = unpack!(block = this.as_operand(block, rhs));
- let lhs = unpack!(block = this.as_lvalue(block, lhs));
- unpack!(block = this.build_drop(block, lhs_span, lhs.clone()));
- this.cfg.push_assign(block, scope_id, expr_span, &lhs, Rvalue::Use(rhs));
- block.unit()
- }
- ExprKind::AssignOp { op, lhs, rhs } => {
- // FIXME(#28160) there is an interesting semantics
- // question raised here -- should we "freeze" the
- // value of the lhs here? I'm inclined to think not,
- // since it seems closer to the semantics of the
- // overloaded version, which takes `&mut self`. This
- // only affects weird things like `x += {x += 1; x}`
- // -- is that equal to `x + (x + 1)` or `2*(x+1)`?
-
- // As above, RTL.
- let rhs = unpack!(block = this.as_operand(block, rhs));
- let lhs = unpack!(block = this.as_lvalue(block, lhs));
-
- // we don't have to drop prior contents or anything
- // because AssignOp is only legal for Copy types
- // (overloaded ops should be desugared into a call).
- this.cfg.push_assign(block, scope_id, expr_span, &lhs,
- Rvalue::BinaryOp(op,
- Operand::Consume(lhs.clone()),
- rhs));
-
- block.unit()
- }
- ExprKind::Continue { label } => {
- this.break_or_continue(expr_span, label, block,
- |loop_scope| loop_scope.continue_block)
- }
- ExprKind::Break { label } => {
- this.break_or_continue(expr_span, label, block, |loop_scope| {
- loop_scope.might_break = true;
- loop_scope.break_block
- })
- }
- ExprKind::Return { value } => {
- block = match value {
- Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)),
- None => {
- this.cfg.push_assign_unit(block, scope_id,
- expr_span, &Lvalue::ReturnPointer);
- block
- }
- };
- let extent = this.extent_of_return_scope();
- let return_block = this.return_block();
- this.exit_scope(expr_span, extent, block, return_block);
- this.cfg.start_new_block().unit()
- }
ExprKind::Call { ty, fun, args } => {
let diverges = match ty.sty {
ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => {
success.unit()
}
+ // These cases don't actually need a destination
+ ExprKind::Assign { .. } |
+ ExprKind::AssignOp { .. } |
+ ExprKind::Continue { .. } |
+ ExprKind::Break { .. } |
+ ExprKind::Return {.. } => {
+ this.stmt_expr(block, expr)
+ }
+
// these are the cases that are more naturally handled by some other mode
ExprKind::Unary { .. } |
ExprKind::Binary { .. } |
}
}
}
-
- fn break_or_continue<F>(&mut self,
- span: Span,
- label: Option<CodeExtent>,
- block: BasicBlock,
- exit_selector: F)
- -> BlockAnd<()>
- where F: FnOnce(&mut LoopScope) -> BasicBlock
- {
- let (exit_block, extent) = {
- let loop_scope = self.find_loop_scope(span, label);
- (exit_selector(loop_scope), loop_scope.extent)
- };
- self.exit_scope(span, extent, block, exit_block);
- self.cfg.start_new_block().unit()
- }
}
mod as_rvalue;
mod as_operand;
mod as_temp;
-mod category;
+pub mod category;
mod into;
pub fn build_drop(&mut self,
block: BasicBlock,
span: Span,
- value: Lvalue<'tcx>)
- -> BlockAnd<()> {
+ value: Lvalue<'tcx>,
+ ty: Ty<'tcx>) -> BlockAnd<()> {
+ if !self.hir.needs_drop(ty) {
+ return block.unit();
+ }
let scope_id = self.innermost_scope_id();
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup();
pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
mir: &mir::Mir<'tcx>)
-> BitVector {
- let mut analyzer = TempAnalyzer::new(mir.temp_decls.len());
+ let mut analyzer = TempAnalyzer::new(mir, bcx, mir.temp_decls.len());
analyzer.visit_mir(mir);
if ty.is_scalar() ||
ty.is_unique() ||
ty.is_region_ptr() ||
- ty.is_simd()
+ ty.is_simd() ||
+ common::type_is_zero_size(bcx.ccx(), ty)
{
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
analyzer.lvalue_temps
}
-struct TempAnalyzer {
+struct TempAnalyzer<'mir, 'bcx, 'tcx: 'mir + 'bcx> {
+ mir: &'mir mir::Mir<'tcx>,
+ bcx: Block<'bcx, 'tcx>,
lvalue_temps: BitVector,
seen_assigned: BitVector
}
-impl TempAnalyzer {
- fn new(temp_count: usize) -> TempAnalyzer {
+impl<'mir, 'bcx, 'tcx> TempAnalyzer<'mir, 'bcx, 'tcx> {
+ fn new(mir: &'mir mir::Mir<'tcx>,
+ bcx: Block<'bcx, 'tcx>,
+ temp_count: usize) -> TempAnalyzer<'mir, 'bcx, 'tcx> {
TempAnalyzer {
+ mir: mir,
+ bcx: bcx,
lvalue_temps: BitVector::new(temp_count),
seen_assigned: BitVector::new(temp_count)
}
}
}
-impl<'tcx> Visitor<'tcx> for TempAnalyzer {
+impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for TempAnalyzer<'mir, 'bcx, 'tcx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
lvalue: &mir::Lvalue<'tcx>,
match *lvalue {
mir::Lvalue::Temp(index) => {
self.mark_assigned(index as usize);
- if !rvalue::rvalue_creates_operand(rvalue) {
+ if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
self.mark_as_lvalue(index as usize);
}
}
use self::lvalue::{LvalueRef, get_dataptr, get_meta};
use rustc_mir::traversal;
-use self::operand::OperandRef;
+use self::operand::{OperandRef, OperandValue};
#[derive(Clone)]
pub enum CachedMir<'mir, 'tcx: 'mir> {
TempRef::Lvalue(LvalueRef::alloca(&bcx,
mty,
&format!("temp{:?}", i)))
+ } else if common::type_is_zero_size(bcx.ccx(), mty) {
+ // Zero-size temporaries aren't always initialized, which
+ // doesn't matter because they don't contain data, but
+ // we need something in the operand.
+ let op = OperandRef {
+ val: OperandValue::Immediate(common::C_nil(bcx.ccx())),
+ ty: mty
+ };
+ TempRef::Operand(Some(op))
} else {
// If this is an immediate temp, we do not create an
// alloca in advance. Instead we wait until we see the
use asm;
use base;
use callee::Callee;
-use common::{self, C_uint, BlockAndBuilder, Result};
+use common::{self, C_uint, Block, BlockAndBuilder, Result};
use datum::{Datum, Lvalue};
use debuginfo::DebugLoc;
use declare;
use tvec;
use value::Value;
use Disr;
+use glue;
use super::MirContext;
use super::operand::{OperandRef, OperandValue};
}
_ => {
- assert!(rvalue_creates_operand(rvalue));
+ bcx.with_block(|bcx| {
+ assert!(rvalue_creates_operand(&self.mir, bcx, rvalue));
+ });
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
self.store_operand(&bcx, dest.llval, temp);
bcx
debug_loc: DebugLoc)
-> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
{
- assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
+ bcx.with_block(|bcx| {
+ assert!(rvalue_creates_operand(&self.mir, bcx, rvalue),
+ "cannot trans {:?} to operand", rvalue);
+ });
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
(bcx, operand)
}
- mir::Rvalue::Use(..) |
+ mir::Rvalue::Use(ref operand) => {
+ let operand = self.trans_operand(&bcx, operand);
+ (bcx, operand)
+ }
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
}
}
-pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
+pub fn rvalue_creates_operand<'bcx, 'tcx>(mir: &mir::Mir<'tcx>, bcx: Block<'bcx, 'tcx>,
+ rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Ref(..) |
mir::Rvalue::Len(..) |
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Box(..) =>
true,
- mir::Rvalue::Use(..) | // (**)
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
mir::Rvalue::InlineAsm { .. } =>
false,
+ mir::Rvalue::Use(ref operand) => {
+ let ty = mir.operand_ty(bcx.tcx(), operand);
+ let ty = bcx.monomorphize(&ty);
+ // Types that don't need dropping can just be an operand,
+ // this allows temporary lvalues, used as rvalues, to
+ // avoid a stack slot when it's unnecessary
+ !glue::type_needs_drop(bcx.tcx(), ty)
+ }
}
// (*) this is only true if the type is suitable
- // (**) we need to zero-out the source operand after moving, so we are restricted to either
- // ensuring all users of `Use` zero it out themselves or not allowing to “create” operand for
- // it.
}
// except according to those terms.
use rustc::mir::repr as mir;
-use common::BlockAndBuilder;
+use common::{self, BlockAndBuilder};
use debuginfo::DebugLoc;
use super::MirContext;
bcx
}
TempRef::Operand(Some(_)) => {
- span_bug!(statement.span,
- "operand {:?} already assigned",
- rvalue);
+ let ty = self.mir.lvalue_ty(bcx.tcx(), lvalue);
+ let ty = bcx.monomorphize(&ty.to_ty(bcx.tcx()));
+
+ if !common::type_is_zero_size(bcx.ccx(), ty) {
+ span_bug!(statement.span,
+ "operand {:?} already assigned",
+ rvalue);
+ } else {
+ // If the type is zero-sized, it's already been set here,
+ // but we still need to make sure we translate the operand
+ let (bcx, _) = self.trans_rvalue_operand(bcx, rvalue,
+ debug_loc);
+ bcx
+ }
}
}
}