1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
19 use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
20 use debuginfo::DebugLoc;
28 use super::MirContext;
29 use super::constant::const_scalar_checked_binop;
30 use super::operand::{OperandRef, OperandValue};
31 use super::lvalue::{LvalueRef, get_dataptr};
33 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
34 pub fn trans_rvalue(&mut self,
35 bcx: BlockAndBuilder<'bcx, 'tcx>,
36 dest: LvalueRef<'tcx>,
37 rvalue: &mir::Rvalue<'tcx>,
39 -> BlockAndBuilder<'bcx, 'tcx>
41 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
42 Value(dest.llval), rvalue);
45 mir::Rvalue::Use(ref operand) => {
46 let tr_operand = self.trans_operand(&bcx, operand);
47 // FIXME: consider not copying constants through stack. (fixable by translating
48 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
49 self.store_operand(&bcx, dest.llval, tr_operand);
53 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
54 let cast_ty = bcx.monomorphize(&cast_ty);
56 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
57 // into-coerce of a thin pointer to a fat pointer - just
58 // use the operand path.
59 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
60 self.store_operand(&bcx, dest.llval, temp);
64 // Unsize of a nontrivial struct. I would prefer for
65 // this to be eliminated by MIR translation, but
66 // `CoerceUnsized` can be passed by a where-clause,
67 // so the (generic) MIR may not be able to expand it.
68 let operand = self.trans_operand(&bcx, source);
69 let operand = operand.pack_if_pair(&bcx);
70 bcx.with_block(|bcx| {
72 OperandValue::Pair(..) => bug!(),
73 OperandValue::Immediate(llval) => {
74 // unsize from an immediate structure. We don't
75 // really need a temporary alloca here, but
76 // avoiding it would require us to have
77 // `coerce_unsized_into` use extractvalue to
78 // index into the struct, and this case isn't
79 // important enough for it.
80 debug!("trans_rvalue: creating ugly alloca");
81 let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
82 base::store_ty(bcx, llval, lltemp, operand.ty);
83 base::coerce_unsized_into(bcx,
87 OperandValue::Ref(llref) => {
88 base::coerce_unsized_into(bcx,
97 mir::Rvalue::Repeat(ref elem, ref count) => {
98 let tr_elem = self.trans_operand(&bcx, elem);
99 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
100 let size = C_uint(bcx.ccx(), size);
101 let base = get_dataptr(&bcx, dest.llval);
102 let bcx = bcx.map_block(|block| {
103 tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
104 self.store_operand_direct(block, llslot, tr_elem);
111 mir::Rvalue::Aggregate(ref kind, ref operands) => {
113 mir::AggregateKind::Adt(adt_def, index, _) => {
114 let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
115 let disr = Disr::from(adt_def.variants[index].disr_val);
116 bcx.with_block(|bcx| {
117 adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
119 for (i, operand) in operands.iter().enumerate() {
120 let op = self.trans_operand(&bcx, operand);
121 // Do not generate stores and GEPis for zero-sized fields.
122 if !common::type_is_zero_size(bcx.ccx(), op.ty) {
123 let val = adt::MaybeSizedValue::sized(dest.llval);
124 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
126 self.store_operand(&bcx, lldest_i, op);
131 // FIXME Shouldn't need to manually trigger closure instantiations.
132 if let mir::AggregateKind::Closure(def_id, substs) = *kind {
135 closure::trans_closure_body_via_mir(bcx.ccx(),
137 bcx.monomorphize(&substs));
140 for (i, operand) in operands.iter().enumerate() {
141 let op = self.trans_operand(&bcx, operand);
142 // Do not generate stores and GEPis for zero-sized fields.
143 if !common::type_is_zero_size(bcx.ccx(), op.ty) {
144 // Note: perhaps this should be StructGep, but
145 // note that in some cases the values here will
146 // not be structs but arrays.
147 let dest = bcx.gepi(dest.llval, &[0, i]);
148 self.store_operand(&bcx, dest, op);
156 mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
157 let outputs = outputs.iter().map(|output| {
158 let lvalue = self.trans_lvalue(&bcx, output);
159 (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
162 let input_vals = inputs.iter().map(|input| {
163 self.trans_operand(&bcx, input).immediate()
166 bcx.with_block(|bcx| {
167 asm::trans_inline_asm(bcx, asm, outputs, input_vals);
174 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
175 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
176 self.store_operand(&bcx, dest.llval, temp);
182 pub fn trans_rvalue_operand(&mut self,
183 bcx: BlockAndBuilder<'bcx, 'tcx>,
184 rvalue: &mir::Rvalue<'tcx>,
186 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
188 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
189 "cannot trans {:?} to operand", rvalue);
192 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
193 let operand = self.trans_operand(&bcx, source);
194 debug!("cast operand is {:?}", operand);
195 let cast_ty = bcx.monomorphize(&cast_ty);
197 let val = match *kind {
198 mir::CastKind::ReifyFnPointer => {
199 match operand.ty.sty {
200 ty::TyFnDef(def_id, substs, _) => {
201 OperandValue::Immediate(
202 Callee::def(bcx.ccx(), def_id, substs)
206 bug!("{} cannot be reified to a fn ptr", operand.ty)
210 mir::CastKind::UnsafeFnPointer => {
211 // this is a no-op at the LLVM level
214 mir::CastKind::Unsize => {
215 // unsize targets other than to a fat pointer currently
216 // can't be operands.
217 assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
220 OperandValue::Pair(lldata, llextra) => {
221 // unsize from a fat pointer - this is a
222 // "trait-object-to-supertrait" coercion, for
224 // &'a fmt::Debug+Send => &'a fmt::Debug,
225 // So we need to pointercast the base to ensure
226 // the types match up.
227 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
228 let lldata = bcx.pointercast(lldata, llcast_ty);
229 OperandValue::Pair(lldata, llextra)
231 OperandValue::Immediate(lldata) => {
233 let (lldata, llextra) = bcx.with_block(|bcx| {
234 base::unsize_thin_ptr(bcx, lldata,
237 OperandValue::Pair(lldata, llextra)
239 OperandValue::Ref(_) => {
240 bug!("by-ref operand {:?} in trans_rvalue_operand",
245 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
246 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
247 let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
248 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
249 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
250 let ll_cft = ll_cast_ty.field_types();
251 let ll_fft = ll_from_ty.field_types();
252 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
253 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
254 OperandValue::Pair(data_cast, meta_ptr)
255 } else { // cast to thin-ptr
256 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
257 // pointer-cast of that pointer to desired pointer type.
258 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
259 OperandValue::Immediate(llval)
262 bug!("Unexpected non-Pair operand")
265 mir::CastKind::Misc => {
266 debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
267 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
268 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
269 let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
270 let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
271 let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
272 let repr = adt::represent_type(bcx.ccx(), operand.ty);
273 let discr = match operand.val {
274 OperandValue::Immediate(llval) => llval,
275 OperandValue::Ref(llptr) => {
276 bcx.with_block(|bcx| {
277 adt::trans_get_discr(bcx, &repr, llptr, None, true)
280 OperandValue::Pair(..) => bug!("Unexpected Pair operand")
282 (discr, adt::is_discr_signed(&repr))
284 (operand.immediate(), operand.ty.is_signed())
287 let newval = match (r_t_in, r_t_out) {
288 (CastTy::Int(_), CastTy::Int(_)) => {
289 let srcsz = ll_t_in.int_width();
290 let dstsz = ll_t_out.int_width();
292 bcx.bitcast(llval, ll_t_out)
293 } else if srcsz > dstsz {
294 bcx.trunc(llval, ll_t_out)
296 bcx.sext(llval, ll_t_out)
298 bcx.zext(llval, ll_t_out)
301 (CastTy::Float, CastTy::Float) => {
302 let srcsz = ll_t_in.float_width();
303 let dstsz = ll_t_out.float_width();
305 bcx.fpext(llval, ll_t_out)
306 } else if srcsz > dstsz {
307 bcx.fptrunc(llval, ll_t_out)
312 (CastTy::Ptr(_), CastTy::Ptr(_)) |
313 (CastTy::FnPtr, CastTy::Ptr(_)) |
314 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
315 bcx.pointercast(llval, ll_t_out),
316 (CastTy::Ptr(_), CastTy::Int(_)) |
317 (CastTy::FnPtr, CastTy::Int(_)) =>
318 bcx.ptrtoint(llval, ll_t_out),
319 (CastTy::Int(_), CastTy::Ptr(_)) =>
320 bcx.inttoptr(llval, ll_t_out),
321 (CastTy::Int(_), CastTy::Float) if signed =>
322 bcx.sitofp(llval, ll_t_out),
323 (CastTy::Int(_), CastTy::Float) =>
324 bcx.uitofp(llval, ll_t_out),
325 (CastTy::Float, CastTy::Int(IntTy::I)) =>
326 bcx.fptosi(llval, ll_t_out),
327 (CastTy::Float, CastTy::Int(_)) =>
328 bcx.fptoui(llval, ll_t_out),
329 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
331 OperandValue::Immediate(newval)
334 let operand = OperandRef {
341 mir::Rvalue::Ref(_, bk, ref lvalue) => {
342 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
344 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
345 let ref_ty = bcx.tcx().mk_ref(
346 bcx.tcx().mk_region(ty::ReErased),
347 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
350 // Note: lvalues are indirect, so storing the `llval` into the
351 // destination effectively creates a reference.
352 let operand = if common::type_is_sized(bcx.tcx(), ty) {
354 val: OperandValue::Immediate(tr_lvalue.llval),
359 val: OperandValue::Pair(tr_lvalue.llval,
367 mir::Rvalue::Len(ref lvalue) => {
368 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
369 let operand = OperandRef {
370 val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
371 ty: bcx.tcx().types.usize,
376 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
377 let lhs = self.trans_operand(&bcx, lhs);
378 let rhs = self.trans_operand(&bcx, rhs);
379 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
380 match (lhs.val, rhs.val) {
381 (OperandValue::Pair(lhs_addr, lhs_extra),
382 OperandValue::Pair(rhs_addr, rhs_extra)) => {
383 bcx.with_block(|bcx| {
384 base::compare_fat_ptrs(bcx,
387 lhs.ty, op.to_hir_binop(),
395 self.trans_scalar_binop(&bcx, op,
396 lhs.immediate(), rhs.immediate(),
399 let operand = OperandRef {
400 val: OperandValue::Immediate(llresult),
401 ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
405 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
406 let lhs = self.trans_operand(&bcx, lhs);
407 let rhs = self.trans_operand(&bcx, rhs);
408 let result = self.trans_scalar_checked_binop(&bcx, op,
409 lhs.immediate(), rhs.immediate(),
411 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
412 let operand_ty = bcx.tcx().mk_tup(vec![val_ty, bcx.tcx().types.bool]);
413 let operand = OperandRef {
421 mir::Rvalue::UnaryOp(op, ref operand) => {
422 let operand = self.trans_operand(&bcx, operand);
423 let lloperand = operand.immediate();
424 let is_float = operand.ty.is_fp();
425 let llval = match op {
426 mir::UnOp::Not => bcx.not(lloperand),
427 mir::UnOp::Neg => if is_float {
434 val: OperandValue::Immediate(llval),
439 mir::Rvalue::Box(content_ty) => {
440 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
441 let llty = type_of::type_of(bcx.ccx(), content_ty);
442 let llsize = machine::llsize_of(bcx.ccx(), llty);
443 let align = type_of::align_of(bcx.ccx(), content_ty);
444 let llalign = C_uint(bcx.ccx(), align);
445 let llty_ptr = llty.ptr_to();
446 let box_ty = bcx.tcx().mk_box(content_ty);
447 let mut llval = None;
448 let bcx = bcx.map_block(|bcx| {
449 let Result { bcx, val } = base::malloc_raw_dyn(bcx,
458 let operand = OperandRef {
459 val: OperandValue::Immediate(llval.unwrap()),
465 mir::Rvalue::Use(ref operand) => {
466 let operand = self.trans_operand(&bcx, operand);
469 mir::Rvalue::Repeat(..) |
470 mir::Rvalue::Aggregate(..) |
471 mir::Rvalue::InlineAsm { .. } => {
472 bug!("cannot generate operand from rvalue {:?}", rvalue);
478 pub fn trans_scalar_binop(&mut self,
479 bcx: &BlockAndBuilder<'bcx, 'tcx>,
483 input_ty: Ty<'tcx>) -> ValueRef {
484 let is_float = input_ty.is_fp();
485 let is_signed = input_ty.is_signed();
487 mir::BinOp::Add => if is_float {
492 mir::BinOp::Sub => if is_float {
497 mir::BinOp::Mul => if is_float {
502 mir::BinOp::Div => if is_float {
504 } else if is_signed {
509 mir::BinOp::Rem => if is_float {
511 } else if is_signed {
516 mir::BinOp::BitOr => bcx.or(lhs, rhs),
517 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
518 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
520 bcx.with_block(|bcx| {
521 common::build_unchecked_lshift(bcx,
528 bcx.with_block(|bcx| {
529 common::build_unchecked_rshift(bcx,
536 mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
537 mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
538 bcx.with_block(|bcx| {
539 base::compare_scalar_types(bcx, lhs, rhs, input_ty,
540 op.to_hir_binop(), DebugLoc::None)
546 pub fn trans_scalar_checked_binop(&mut self,
547 bcx: &BlockAndBuilder<'bcx, 'tcx>,
551 input_ty: Ty<'tcx>) -> OperandValue {
552 // This case can currently arise only from functions marked
553 // with #[rustc_inherit_overflow_checks] and inlined from
554 // another crate (mostly core::num generic/#[inline] fns),
555 // while the current crate doesn't use overflow checks.
556 if !bcx.ccx().check_overflow() {
557 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
558 return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
561 // First try performing the operation on constants, which
562 // will only succeed if both operands are constant.
563 // This is necessary to determine when an overflow Assert
564 // will always panic at runtime, and produce a warning.
565 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
566 return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
569 let (val, of) = match op {
570 // These are checked using intrinsics
571 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
573 mir::BinOp::Add => OverflowOp::Add,
574 mir::BinOp::Sub => OverflowOp::Sub,
575 mir::BinOp::Mul => OverflowOp::Mul,
578 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
579 let res = bcx.call(intrinsic, &[lhs, rhs], None);
581 (bcx.extract_value(res, 0),
582 bcx.extract_value(res, 1))
584 mir::BinOp::Shl | mir::BinOp::Shr => {
585 let lhs_llty = val_ty(lhs);
586 let rhs_llty = val_ty(rhs);
587 let invert_mask = bcx.with_block(|bcx| {
588 common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
590 let outer_bits = bcx.and(rhs, invert_mask);
592 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
593 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
598 bug!("Operator `{:?}` is not a checkable operator", op)
602 OperandValue::Pair(val, of)
606 pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
607 _bcx: &BlockAndBuilder<'bcx, 'tcx>,
608 rvalue: &mir::Rvalue<'tcx>) -> bool {
610 mir::Rvalue::Ref(..) |
611 mir::Rvalue::Len(..) |
612 mir::Rvalue::Cast(..) | // (*)
613 mir::Rvalue::BinaryOp(..) |
614 mir::Rvalue::CheckedBinaryOp(..) |
615 mir::Rvalue::UnaryOp(..) |
616 mir::Rvalue::Box(..) |
617 mir::Rvalue::Use(..) =>
619 mir::Rvalue::Repeat(..) |
620 mir::Rvalue::Aggregate(..) |
621 mir::Rvalue::InlineAsm { .. } =>
625 // (*) this is only true if the type is suitable
628 #[derive(Copy, Clone)]
633 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
634 use syntax::ast::IntTy::*;
635 use syntax::ast::UintTy::*;
636 use rustc::ty::{TyInt, TyUint};
640 let new_sty = match ty.sty {
641 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
644 _ => panic!("unsupported target word size")
646 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
649 _ => panic!("unsupported target word size")
651 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
652 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
655 let name = match oop {
656 OverflowOp::Add => match new_sty {
657 TyInt(I8) => "llvm.sadd.with.overflow.i8",
658 TyInt(I16) => "llvm.sadd.with.overflow.i16",
659 TyInt(I32) => "llvm.sadd.with.overflow.i32",
660 TyInt(I64) => "llvm.sadd.with.overflow.i64",
662 TyUint(U8) => "llvm.uadd.with.overflow.i8",
663 TyUint(U16) => "llvm.uadd.with.overflow.i16",
664 TyUint(U32) => "llvm.uadd.with.overflow.i32",
665 TyUint(U64) => "llvm.uadd.with.overflow.i64",
669 OverflowOp::Sub => match new_sty {
670 TyInt(I8) => "llvm.ssub.with.overflow.i8",
671 TyInt(I16) => "llvm.ssub.with.overflow.i16",
672 TyInt(I32) => "llvm.ssub.with.overflow.i32",
673 TyInt(I64) => "llvm.ssub.with.overflow.i64",
675 TyUint(U8) => "llvm.usub.with.overflow.i8",
676 TyUint(U16) => "llvm.usub.with.overflow.i16",
677 TyUint(U32) => "llvm.usub.with.overflow.i32",
678 TyUint(U64) => "llvm.usub.with.overflow.i64",
682 OverflowOp::Mul => match new_sty {
683 TyInt(I8) => "llvm.smul.with.overflow.i8",
684 TyInt(I16) => "llvm.smul.with.overflow.i16",
685 TyInt(I32) => "llvm.smul.with.overflow.i32",
686 TyInt(I64) => "llvm.smul.with.overflow.i64",
688 TyUint(U8) => "llvm.umul.with.overflow.i8",
689 TyUint(U16) => "llvm.umul.with.overflow.i16",
690 TyUint(U32) => "llvm.umul.with.overflow.i32",
691 TyUint(U64) => "llvm.umul.with.overflow.i64",
697 bcx.ccx().get_intrinsic(&name)