use libc::c_uint;
use llvm::{self, ValueRef};
+use rustc_data_structures::fnv::FnvHashSet;
use rustc_mir::repr as mir;
use rustc_mir::tcx::LvalueTy;
-use std::cell::Cell;
use trans::base;
use trans::build;
use trans::common::{self, Block};
use trans::type_of;
use self::lvalue::LvalueRef;
+use self::operand::OperandRef;
// FIXME DebugLoc is always None right now
/// An LLVM alloca for each MIR `VarDecl`
vars: Vec<LvalueRef<'tcx>>,
- /// An LLVM alloca for each MIR `TempDecl`
- temps: Vec<LvalueRef<'tcx>>,
+ /// The location where each MIR `TempDecl` is stored. This is
+ /// usually an `LvalueRef` representing an alloca, but not always:
+ /// sometimes we can skip the alloca and just store the value
+ /// directly using an `OperandRef`, which makes for tighter LLVM
+ /// IR. The conditions for using an `OperandRef` are as follows:
+ ///
+ /// - the type of the temporary must be judged "immediate" by `type_is_immediate`
+ /// - the operand must never be referenced indirectly
+ /// - we should not take its address using the `&` operator
+ /// - nor should it appear in an lvalue path like `tmp.a`
+ /// - the operand must be defined by an rvalue that can generate immediate
+ /// values
+ temps: Vec<TempRef<'tcx>>,
/// The arguments to the function; as args are lvalues, these are
/// always indirect, though we try to avoid creating an alloca
args: Vec<LvalueRef<'tcx>>,
}
+enum TempRef<'tcx> {
+ Lvalue(LvalueRef<'tcx>),
+ Operand(Option<OperandRef<'tcx>>),
+}
+
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) {
let mir_blocks = bcx.mir().all_basic_blocks();
+ // Analyze the temps to determine which must be lvalues
+ // FIXME
+ let lvalue_temps: FnvHashSet<usize> = (0..mir.temp_decls.len()).collect();
+
// Allocate variable and temp allocas
let vars = mir.var_decls.iter()
.map(|decl| (bcx.monomorphize(&decl.ty), decl.name))
let temps = mir.temp_decls.iter()
.map(|decl| bcx.monomorphize(&decl.ty))
.enumerate()
- .map(|(i, mty)| LvalueRef::alloca(bcx, mty, &format!("temp{:?}", i)))
+ .map(|(i, mty)| if lvalue_temps.contains(&i) {
+ TempRef::Lvalue(LvalueRef::alloca(bcx,
+ mty,
+ &format!("temp{:?}", i)))
+ } else {
+ // If this is an immediate temp, we do not create an
+ // alloca in advance. Instead we wait until we see the
+ // definition and update the operand there.
+ TempRef::Operand(None)
+ })
.collect();
let args = arg_value_refs(bcx, mir);
unimplemented!()
}
+ mir::Rvalue::Aggregate(_, ref operands) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let lldest_i = build::GEPi(bcx, lldest, &[0, i]);
+ self.trans_operand_into(bcx, lldest_i, operand);
+ }
+ bcx
+ }
+
+ mir::Rvalue::Slice { ref input, from_start, from_end } => {
+ let ccx = bcx.ccx();
+ let input = self.trans_lvalue(bcx, input);
+ let (llbase, lllen) = tvec::get_base_and_len(bcx,
+ input.llval,
+ input.ty.to_ty(bcx.tcx()));
+ let llbase1 = build::GEPi(bcx, llbase, &[from_start]);
+ let adj = common::C_uint(ccx, from_start + from_end);
+ let lllen1 = build::Sub(bcx, lllen, adj, DebugLoc::None);
+ build::Store(bcx, llbase1, build::GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR]));
+ build::Store(bcx, lllen1, build::GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]));
+ bcx
+ }
+
+ mir::Rvalue::InlineAsm(inline_asm) => {
+ asm::trans_inline_asm(bcx, inline_asm)
+ }
+
+ _ => {
+ assert!(self.rvalue_creates_operand(rvalue));
+ let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
+ build::Store(bcx, temp.llval, lldest);
+ bcx
+ }
+ }
+ }
+
+ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
+ match *rvalue {
+ mir::Rvalue::Use(..) | // (*)
+ mir::Rvalue::Ref(..) |
+ mir::Rvalue::Len(..) |
+ mir::Rvalue::Cast(..) | // (*)
+ mir::Rvalue::BinaryOp(..) |
+ mir::Rvalue::UnaryOp(..) |
+ mir::Rvalue::Box(..) =>
+ true,
+ mir::Rvalue::Repeat(..) |
+ mir::Rvalue::Aggregate(..) |
+ mir::Rvalue::Slice { .. } |
+ mir::Rvalue::InlineAsm(..) =>
+ false,
+ }
+
+ // (*) this is only true if the type is suitable
+ }
+
+ pub fn trans_rvalue_operand(&mut self,
+ bcx: Block<'bcx, 'tcx>,
+ rvalue: &mir::Rvalue<'tcx>)
+ -> (Block<'bcx, 'tcx>, OperandRef<'tcx>)
+ {
+ assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
+
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let operand = self.trans_operand(bcx, operand);
+ (bcx, operand)
+ }
+
+ mir::Rvalue::Cast(..) => {
+ unimplemented!()
+ }
+
mir::Rvalue::Ref(_, _, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
+
// Note: lvalues are indirect, so storing the `llval` into the
// destination effectively creates a reference.
- build::Store(bcx, tr_lvalue.llval, lldest);
- bcx
+ (bcx, OperandRef {
+ llval: tr_lvalue.llval,
+ ty: tr_lvalue.ty.to_ty(bcx.tcx()),
+ })
}
mir::Rvalue::Len(ref lvalue) => {
let (_, lllen) = tvec::get_base_and_len(bcx,
tr_lvalue.llval,
tr_lvalue.ty.to_ty(bcx.tcx()));
- build::Store(bcx, lllen, lldest);
- bcx
+ (bcx, OperandRef {
+ llval: lllen,
+ ty: bcx.tcx().types.usize,
+ })
}
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
mir::BinOp::Gt => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiGt, binop_debug_loc),
};
- build::Store(bcx, llval, lldest);
- bcx
+ (bcx, OperandRef {
+ llval: llval,
+ ty: lhs.ty,
+ })
}
mir::Rvalue::UnaryOp(op, ref operand) => {
build::Neg(bcx, operand.llval, debug_loc)
}
};
- build::Store(bcx, llval, lldest);
- bcx
+ (bcx, OperandRef {
+ llval: llval,
+ ty: operand.ty,
+ })
}
mir::Rvalue::Box(content_ty) => {
- let content_ty: Ty<'tcx> = content_ty;
+ let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
let llty = type_of::type_of(bcx.ccx(), content_ty);
let llsize = machine::llsize_of(bcx.ccx(), llty);
let align = type_of::align_of(bcx.ccx(), content_ty);
llsize,
llalign,
DebugLoc::None);
- build::Store(bcx, llval, lldest);
- bcx
- }
-
- mir::Rvalue::Aggregate(_, ref operands) => {
- for (i, operand) in operands.iter().enumerate() {
- let lldest_i = build::GEPi(bcx, lldest, &[0, i]);
- self.trans_operand_into(bcx, lldest_i, operand);
- }
- bcx
+ (bcx, OperandRef {
+ llval: llval,
+ ty: box_ty,
+ })
}
- mir::Rvalue::Slice { ref input, from_start, from_end } => {
- let ccx = bcx.ccx();
- let input = self.trans_lvalue(bcx, input);
- let (llbase, lllen) = tvec::get_base_and_len(bcx,
- input.llval,
- input.ty.to_ty(bcx.tcx()));
- let llbase1 = build::GEPi(bcx, llbase, &[from_start]);
- let adj = common::C_uint(ccx, from_start + from_end);
- let lllen1 = build::Sub(bcx, lllen, adj, DebugLoc::None);
- build::Store(bcx, llbase1, build::GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR]));
- build::Store(bcx, lllen1, build::GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]));
- bcx
- }
-
- mir::Rvalue::InlineAsm(inline_asm) => {
- asm::trans_inline_asm(bcx, inline_asm)
+ mir::Rvalue::Repeat(..) |
+ mir::Rvalue::Aggregate(..) |
+ mir::Rvalue::Slice { .. } |
+ mir::Rvalue::InlineAsm(..) => {
+ bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
}
}
}
use trans::glue;
use super::MirContext;
+use super::TempRef;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_statement(&mut self,
match statement.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
- let tr_dest = self.trans_lvalue(bcx, lvalue);
- self.trans_rvalue(bcx, tr_dest.llval, rvalue);
- bcx
+ match *lvalue {
+ mir::Lvalue::Temp(index) => {
+ let index = index as usize;
+ match self.temps[index as usize] {
+ TempRef::Lvalue(tr_dest) => {
+ self.trans_rvalue(bcx, tr_dest.llval, rvalue)
+ }
+ TempRef::Operand(None) => {
+ let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
+ self.temps[index] = TempRef::Operand(Some(operand));
+ bcx
+ }
+ TempRef::Operand(Some(_)) => {
+ bcx.tcx().sess.span_bug(
+ statement.span,
+ &format!("operand {:?} already assigned", rvalue));
+ }
+ }
+ }
+ _ => {
+ let tr_dest = self.trans_lvalue(bcx, lvalue);
+ self.trans_rvalue(bcx, tr_dest.llval, rvalue)
+ }
+ }
}
mir::StatementKind::Drop(mir::DropKind::Deep, ref lvalue) => {