1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
19 use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
20 use datum::{Datum, Lvalue};
21 use debuginfo::DebugLoc;
29 use super::MirContext;
30 use super::constant::const_scalar_checked_binop;
31 use super::operand::{OperandRef, OperandValue};
32 use super::lvalue::{LvalueRef, get_dataptr};
34 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
35 pub fn trans_rvalue(&mut self,
36 bcx: BlockAndBuilder<'bcx, 'tcx>,
37 dest: LvalueRef<'tcx>,
38 rvalue: &mir::Rvalue<'tcx>,
40 -> BlockAndBuilder<'bcx, 'tcx>
42 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
43 Value(dest.llval), rvalue);
46 mir::Rvalue::Use(ref operand) => {
47 let tr_operand = self.trans_operand(&bcx, operand);
48 // FIXME: consider not copying constants through stack. (fixable by translating
49 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
50 self.store_operand(&bcx, dest.llval, tr_operand);
54 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
55 let cast_ty = bcx.monomorphize(&cast_ty);
57 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
58 // into-coerce of a thin pointer to a fat pointer - just
59 // use the operand path.
60 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
61 self.store_operand(&bcx, dest.llval, temp);
65 // Unsize of a nontrivial struct. I would prefer for
66 // this to be eliminated by MIR translation, but
67 // `CoerceUnsized` can be passed by a where-clause,
68 // so the (generic) MIR may not be able to expand it.
69 let operand = self.trans_operand(&bcx, source);
70 let operand = operand.pack_if_pair(&bcx);
71 bcx.with_block(|bcx| {
73 OperandValue::Pair(..) => bug!(),
74 OperandValue::Immediate(llval) => {
75 // unsize from an immediate structure. We don't
76 // really need a temporary alloca here, but
77 // avoiding it would require us to have
78 // `coerce_unsized_into` use extractvalue to
79 // index into the struct, and this case isn't
80 // important enough for it.
81 debug!("trans_rvalue: creating ugly alloca");
82 let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
83 base::store_ty(bcx, llval, lltemp, operand.ty);
84 base::coerce_unsized_into(bcx,
88 OperandValue::Ref(llref) => {
89 base::coerce_unsized_into(bcx,
98 mir::Rvalue::Repeat(ref elem, ref count) => {
99 let tr_elem = self.trans_operand(&bcx, elem);
100 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
101 let size = C_uint(bcx.ccx(), size);
102 let base = get_dataptr(&bcx, dest.llval);
103 let bcx = bcx.map_block(|block| {
104 tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
105 self.store_operand_direct(block, llslot, tr_elem);
112 mir::Rvalue::Aggregate(ref kind, ref operands) => {
114 mir::AggregateKind::Adt(adt_def, index, _) => {
115 let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
116 let disr = Disr::from(adt_def.variants[index].disr_val);
117 bcx.with_block(|bcx| {
118 adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
120 for (i, operand) in operands.iter().enumerate() {
121 let op = self.trans_operand(&bcx, operand);
122 // Do not generate stores and GEPis for zero-sized fields.
123 if !common::type_is_zero_size(bcx.ccx(), op.ty) {
124 let val = adt::MaybeSizedValue::sized(dest.llval);
125 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
127 self.store_operand(&bcx, lldest_i, op);
132 // FIXME Shouldn't need to manually trigger closure instantiations.
133 if let mir::AggregateKind::Closure(def_id, substs) = *kind {
136 closure::trans_closure_body_via_mir(bcx.ccx(),
138 bcx.monomorphize(&substs));
141 for (i, operand) in operands.iter().enumerate() {
142 let op = self.trans_operand(&bcx, operand);
143 // Do not generate stores and GEPis for zero-sized fields.
144 if !common::type_is_zero_size(bcx.ccx(), op.ty) {
145 // Note: perhaps this should be StructGep, but
146 // note that in some cases the values here will
147 // not be structs but arrays.
148 let dest = bcx.gepi(dest.llval, &[0, i]);
149 self.store_operand(&bcx, dest, op);
157 mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
158 let outputs = outputs.iter().map(|output| {
159 let lvalue = self.trans_lvalue(&bcx, output);
160 Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
164 let input_vals = inputs.iter().map(|input| {
165 self.trans_operand(&bcx, input).immediate()
168 bcx.with_block(|bcx| {
169 asm::trans_inline_asm(bcx, asm, outputs, input_vals);
176 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
177 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
178 self.store_operand(&bcx, dest.llval, temp);
184 pub fn trans_rvalue_operand(&mut self,
185 bcx: BlockAndBuilder<'bcx, 'tcx>,
186 rvalue: &mir::Rvalue<'tcx>,
188 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
190 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
191 "cannot trans {:?} to operand", rvalue);
194 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
195 let operand = self.trans_operand(&bcx, source);
196 debug!("cast operand is {:?}", operand);
197 let cast_ty = bcx.monomorphize(&cast_ty);
199 let val = match *kind {
200 mir::CastKind::ReifyFnPointer => {
201 match operand.ty.sty {
202 ty::TyFnDef(def_id, substs, _) => {
203 OperandValue::Immediate(
204 Callee::def(bcx.ccx(), def_id, substs)
205 .reify(bcx.ccx()).val)
208 bug!("{} cannot be reified to a fn ptr", operand.ty)
212 mir::CastKind::UnsafeFnPointer => {
213 // this is a no-op at the LLVM level
216 mir::CastKind::Unsize => {
217 // unsize targets other than to a fat pointer currently
218 // can't be operands.
219 assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
222 OperandValue::Pair(lldata, llextra) => {
223 // unsize from a fat pointer - this is a
224 // "trait-object-to-supertrait" coercion, for
226 // &'a fmt::Debug+Send => &'a fmt::Debug,
227 // So we need to pointercast the base to ensure
228 // the types match up.
229 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
230 let lldata = bcx.pointercast(lldata, llcast_ty);
231 OperandValue::Pair(lldata, llextra)
233 OperandValue::Immediate(lldata) => {
235 let (lldata, llextra) = bcx.with_block(|bcx| {
236 base::unsize_thin_ptr(bcx, lldata,
239 OperandValue::Pair(lldata, llextra)
241 OperandValue::Ref(_) => {
242 bug!("by-ref operand {:?} in trans_rvalue_operand",
247 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
248 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
249 let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
250 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
251 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
252 let ll_cft = ll_cast_ty.field_types();
253 let ll_fft = ll_from_ty.field_types();
254 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
255 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
256 OperandValue::Pair(data_cast, meta_ptr)
257 } else { // cast to thin-ptr
258 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
259 // pointer-cast of that pointer to desired pointer type.
260 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
261 OperandValue::Immediate(llval)
264 bug!("Unexpected non-Pair operand")
267 mir::CastKind::Misc => {
268 debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
269 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
270 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
271 let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
272 let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
273 let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
274 let repr = adt::represent_type(bcx.ccx(), operand.ty);
275 let discr = match operand.val {
276 OperandValue::Immediate(llval) => llval,
277 OperandValue::Ref(llptr) => {
278 bcx.with_block(|bcx| {
279 adt::trans_get_discr(bcx, &repr, llptr, None, true)
282 OperandValue::Pair(..) => bug!("Unexpected Pair operand")
284 (discr, adt::is_discr_signed(&repr))
286 (operand.immediate(), operand.ty.is_signed())
289 let newval = match (r_t_in, r_t_out) {
290 (CastTy::Int(_), CastTy::Int(_)) => {
291 let srcsz = ll_t_in.int_width();
292 let dstsz = ll_t_out.int_width();
294 bcx.bitcast(llval, ll_t_out)
295 } else if srcsz > dstsz {
296 bcx.trunc(llval, ll_t_out)
298 bcx.sext(llval, ll_t_out)
300 bcx.zext(llval, ll_t_out)
303 (CastTy::Float, CastTy::Float) => {
304 let srcsz = ll_t_in.float_width();
305 let dstsz = ll_t_out.float_width();
307 bcx.fpext(llval, ll_t_out)
308 } else if srcsz > dstsz {
309 bcx.fptrunc(llval, ll_t_out)
314 (CastTy::Ptr(_), CastTy::Ptr(_)) |
315 (CastTy::FnPtr, CastTy::Ptr(_)) |
316 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
317 bcx.pointercast(llval, ll_t_out),
318 (CastTy::Ptr(_), CastTy::Int(_)) |
319 (CastTy::FnPtr, CastTy::Int(_)) =>
320 bcx.ptrtoint(llval, ll_t_out),
321 (CastTy::Int(_), CastTy::Ptr(_)) =>
322 bcx.inttoptr(llval, ll_t_out),
323 (CastTy::Int(_), CastTy::Float) if signed =>
324 bcx.sitofp(llval, ll_t_out),
325 (CastTy::Int(_), CastTy::Float) =>
326 bcx.uitofp(llval, ll_t_out),
327 (CastTy::Float, CastTy::Int(IntTy::I)) =>
328 bcx.fptosi(llval, ll_t_out),
329 (CastTy::Float, CastTy::Int(_)) =>
330 bcx.fptoui(llval, ll_t_out),
331 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
333 OperandValue::Immediate(newval)
336 let operand = OperandRef {
343 mir::Rvalue::Ref(_, bk, ref lvalue) => {
344 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
346 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
347 let ref_ty = bcx.tcx().mk_ref(
348 bcx.tcx().mk_region(ty::ReErased),
349 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
352 // Note: lvalues are indirect, so storing the `llval` into the
353 // destination effectively creates a reference.
354 let operand = if common::type_is_sized(bcx.tcx(), ty) {
356 val: OperandValue::Immediate(tr_lvalue.llval),
361 val: OperandValue::Pair(tr_lvalue.llval,
369 mir::Rvalue::Len(ref lvalue) => {
370 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
371 let operand = OperandRef {
372 val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
373 ty: bcx.tcx().types.usize,
378 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
379 let lhs = self.trans_operand(&bcx, lhs);
380 let rhs = self.trans_operand(&bcx, rhs);
381 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
382 match (lhs.val, rhs.val) {
383 (OperandValue::Pair(lhs_addr, lhs_extra),
384 OperandValue::Pair(rhs_addr, rhs_extra)) => {
385 bcx.with_block(|bcx| {
386 base::compare_fat_ptrs(bcx,
389 lhs.ty, op.to_hir_binop(),
397 self.trans_scalar_binop(&bcx, op,
398 lhs.immediate(), rhs.immediate(),
401 let operand = OperandRef {
402 val: OperandValue::Immediate(llresult),
403 ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
407 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
408 let lhs = self.trans_operand(&bcx, lhs);
409 let rhs = self.trans_operand(&bcx, rhs);
410 let result = self.trans_scalar_checked_binop(&bcx, op,
411 lhs.immediate(), rhs.immediate(),
413 let val_ty = self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty);
414 let operand_ty = bcx.tcx().mk_tup(vec![val_ty, bcx.tcx().types.bool]);
415 let operand = OperandRef {
423 mir::Rvalue::UnaryOp(op, ref operand) => {
424 let operand = self.trans_operand(&bcx, operand);
425 let lloperand = operand.immediate();
426 let is_float = operand.ty.is_fp();
427 let llval = match op {
428 mir::UnOp::Not => bcx.not(lloperand),
429 mir::UnOp::Neg => if is_float {
436 val: OperandValue::Immediate(llval),
441 mir::Rvalue::Box(content_ty) => {
442 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
443 let llty = type_of::type_of(bcx.ccx(), content_ty);
444 let llsize = machine::llsize_of(bcx.ccx(), llty);
445 let align = type_of::align_of(bcx.ccx(), content_ty);
446 let llalign = C_uint(bcx.ccx(), align);
447 let llty_ptr = llty.ptr_to();
448 let box_ty = bcx.tcx().mk_box(content_ty);
449 let mut llval = None;
450 let bcx = bcx.map_block(|bcx| {
451 let Result { bcx, val } = base::malloc_raw_dyn(bcx,
460 let operand = OperandRef {
461 val: OperandValue::Immediate(llval.unwrap()),
467 mir::Rvalue::Use(ref operand) => {
468 let operand = self.trans_operand(&bcx, operand);
471 mir::Rvalue::Repeat(..) |
472 mir::Rvalue::Aggregate(..) |
473 mir::Rvalue::InlineAsm { .. } => {
474 bug!("cannot generate operand from rvalue {:?}", rvalue);
480 pub fn trans_scalar_binop(&mut self,
481 bcx: &BlockAndBuilder<'bcx, 'tcx>,
485 input_ty: Ty<'tcx>) -> ValueRef {
486 let is_float = input_ty.is_fp();
487 let is_signed = input_ty.is_signed();
489 mir::BinOp::Add => if is_float {
494 mir::BinOp::Sub => if is_float {
499 mir::BinOp::Mul => if is_float {
504 mir::BinOp::Div => if is_float {
506 } else if is_signed {
511 mir::BinOp::Rem => if is_float {
513 } else if is_signed {
518 mir::BinOp::BitOr => bcx.or(lhs, rhs),
519 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
520 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
522 bcx.with_block(|bcx| {
523 common::build_unchecked_lshift(bcx,
530 bcx.with_block(|bcx| {
531 common::build_unchecked_rshift(bcx,
538 mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
539 mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
540 bcx.with_block(|bcx| {
541 base::compare_scalar_types(bcx, lhs, rhs, input_ty,
542 op.to_hir_binop(), DebugLoc::None)
548 pub fn trans_scalar_checked_binop(&mut self,
549 bcx: &BlockAndBuilder<'bcx, 'tcx>,
553 input_ty: Ty<'tcx>) -> OperandValue {
554 // This case can currently arise only from functions marked
555 // with #[rustc_inherit_overflow_checks] and inlined from
556 // another crate (mostly core::num generic/#[inline] fns),
557 // while the current crate doesn't use overflow checks.
558 if !bcx.ccx().check_overflow() {
559 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
560 return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
563 // First try performing the operation on constants, which
564 // will only succeed if both operands are constant.
565 // This is necessary to determine when an overflow Assert
566 // will always panic at runtime, and produce a warning.
567 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
568 return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
571 let (val, of) = match op {
572 // These are checked using intrinsics
573 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
575 mir::BinOp::Add => OverflowOp::Add,
576 mir::BinOp::Sub => OverflowOp::Sub,
577 mir::BinOp::Mul => OverflowOp::Mul,
580 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
581 let res = bcx.call(intrinsic, &[lhs, rhs], None);
583 (bcx.extract_value(res, 0),
584 bcx.extract_value(res, 1))
586 mir::BinOp::Shl | mir::BinOp::Shr => {
587 let lhs_llty = val_ty(lhs);
588 let rhs_llty = val_ty(rhs);
589 let invert_mask = bcx.with_block(|bcx| {
590 common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
592 let outer_bits = bcx.and(rhs, invert_mask);
594 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
595 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
600 bug!("Operator `{:?}` is not a checkable operator", op)
604 OperandValue::Pair(val, of)
608 pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
609 _bcx: &BlockAndBuilder<'bcx, 'tcx>,
610 rvalue: &mir::Rvalue<'tcx>) -> bool {
612 mir::Rvalue::Ref(..) |
613 mir::Rvalue::Len(..) |
614 mir::Rvalue::Cast(..) | // (*)
615 mir::Rvalue::BinaryOp(..) |
616 mir::Rvalue::CheckedBinaryOp(..) |
617 mir::Rvalue::UnaryOp(..) |
618 mir::Rvalue::Box(..) |
619 mir::Rvalue::Use(..) =>
621 mir::Rvalue::Repeat(..) |
622 mir::Rvalue::Aggregate(..) |
623 mir::Rvalue::InlineAsm { .. } =>
627 // (*) this is only true if the type is suitable
630 #[derive(Copy, Clone)]
635 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
636 use syntax::ast::IntTy::*;
637 use syntax::ast::UintTy::*;
638 use rustc::ty::{TyInt, TyUint};
642 let new_sty = match ty.sty {
643 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
646 _ => panic!("unsupported target word size")
648 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
651 _ => panic!("unsupported target word size")
653 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
654 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
657 let name = match oop {
658 OverflowOp::Add => match new_sty {
659 TyInt(I8) => "llvm.sadd.with.overflow.i8",
660 TyInt(I16) => "llvm.sadd.with.overflow.i16",
661 TyInt(I32) => "llvm.sadd.with.overflow.i32",
662 TyInt(I64) => "llvm.sadd.with.overflow.i64",
664 TyUint(U8) => "llvm.uadd.with.overflow.i8",
665 TyUint(U16) => "llvm.uadd.with.overflow.i16",
666 TyUint(U32) => "llvm.uadd.with.overflow.i32",
667 TyUint(U64) => "llvm.uadd.with.overflow.i64",
671 OverflowOp::Sub => match new_sty {
672 TyInt(I8) => "llvm.ssub.with.overflow.i8",
673 TyInt(I16) => "llvm.ssub.with.overflow.i16",
674 TyInt(I32) => "llvm.ssub.with.overflow.i32",
675 TyInt(I64) => "llvm.ssub.with.overflow.i64",
677 TyUint(U8) => "llvm.usub.with.overflow.i8",
678 TyUint(U16) => "llvm.usub.with.overflow.i16",
679 TyUint(U32) => "llvm.usub.with.overflow.i32",
680 TyUint(U64) => "llvm.usub.with.overflow.i64",
684 OverflowOp::Mul => match new_sty {
685 TyInt(I8) => "llvm.smul.with.overflow.i8",
686 TyInt(I16) => "llvm.smul.with.overflow.i16",
687 TyInt(I32) => "llvm.smul.with.overflow.i32",
688 TyInt(I64) => "llvm.smul.with.overflow.i64",
690 TyUint(U8) => "llvm.umul.with.overflow.i8",
691 TyUint(U16) => "llvm.umul.with.overflow.i16",
692 TyUint(U32) => "llvm.umul.with.overflow.i32",
693 TyUint(U64) => "llvm.umul.with.overflow.i64",
699 bcx.ccx().get_intrinsic(&name)