1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use rustc::middle::ty::{self, Ty};
13 use middle::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
19 use trans::common::{self, Block, Result};
20 use trans::debuginfo::DebugLoc;
25 use trans::type_::Type;
29 use super::MirContext;
30 use super::operand::{OperandRef, OperandValue};
31 use super::lvalue::LvalueRef;
33 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
34 pub fn trans_rvalue(&mut self,
35 bcx: Block<'bcx, 'tcx>,
36 dest: LvalueRef<'tcx>,
37 rvalue: &mir::Rvalue<'tcx>)
40 debug!("trans_rvalue(dest.llval={}, rvalue={:?})",
41 bcx.val_to_string(dest.llval),
45 mir::Rvalue::Use(ref operand) => {
46 self.trans_operand_into(bcx, dest.llval, operand);
50 mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => {
51 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
52 // into-coerce of a thin pointer to a fat pointer - just
53 // use the operand path.
54 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
55 self.store_operand(bcx, dest.llval, temp);
59 // Unsize of a nontrivial struct. I would prefer for
60 // this to be eliminated by MIR translation, but
61 // `CoerceUnsized` can be passed by a where-clause,
62 // so the (generic) MIR may not be able to expand it.
63 let operand = self.trans_operand(bcx, operand);
65 OperandValue::FatPtr(..) => unreachable!(),
66 OperandValue::Immediate(llval) => {
67 // unsize from an immediate structure. We don't
68 // really need a temporary alloca here, but
69 // avoiding it would require us to have
70 // `coerce_unsized_into` use extractvalue to
71 // index into the struct, and this case isn't
72 // important enough for it.
73 debug!("trans_rvalue: creating ugly alloca");
74 let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
75 base::store_ty(bcx, llval, lltemp, operand.ty);
76 base::coerce_unsized_into(bcx,
80 OperandValue::Ref(llref) => {
81 base::coerce_unsized_into(bcx,
89 mir::Rvalue::Repeat(ref elem, ref count) => {
90 let elem = self.trans_operand(bcx, elem);
91 let size = self.trans_constant(bcx, count).immediate();
92 let base = expr::get_dataptr(bcx, dest.llval);
93 tvec::iter_vec_raw(bcx, base, elem.ty, size, |bcx, llslot, _| {
94 self.store_operand(bcx, llslot, elem);
99 mir::Rvalue::Aggregate(ref kind, ref operands) => {
101 // Unit struct or variant; both are translated very differently compared to any
103 mir::AggregateKind::Adt(adt_def, index, _)
104 if adt_def.variants[index].kind() == ty::VariantKind::Unit => {
105 let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
106 let disr = adt_def.variants[index].disr_val;
107 adt::trans_set_discr(bcx, &*repr, dest.llval, disr);
110 for (i, operand) in operands.iter().enumerate() {
111 // Note: perhaps this should be StructGep, but
112 // note that in some cases the values here will
113 // not be structs but arrays.
114 let lldest_i = build::GEPi(bcx, dest.llval, &[0, i]);
115 self.trans_operand_into(bcx, lldest_i, operand);
122 mir::Rvalue::Slice { ref input, from_start, from_end } => {
124 let input = self.trans_lvalue(bcx, input);
125 let (llbase, lllen) = tvec::get_base_and_len(bcx,
127 input.ty.to_ty(bcx.tcx()));
128 let llbase1 = build::GEPi(bcx, llbase, &[from_start]);
129 let adj = common::C_uint(ccx, from_start + from_end);
130 let lllen1 = build::Sub(bcx, lllen, adj, DebugLoc::None);
131 let lladdrdest = expr::get_dataptr(bcx, dest.llval);
132 build::Store(bcx, llbase1, lladdrdest);
133 let llmetadest = expr::get_meta(bcx, dest.llval);
134 build::Store(bcx, lllen1, llmetadest);
138 mir::Rvalue::InlineAsm(ref inline_asm) => {
139 asm::trans_inline_asm(bcx, inline_asm)
143 assert!(rvalue_creates_operand(rvalue));
144 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
145 self.store_operand(bcx, dest.llval, temp);
151 pub fn trans_rvalue_operand(&mut self,
152 bcx: Block<'bcx, 'tcx>,
153 rvalue: &mir::Rvalue<'tcx>)
154 -> (Block<'bcx, 'tcx>, OperandRef<'tcx>)
156 assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
159 mir::Rvalue::Use(ref operand) => {
160 let operand = self.trans_operand(bcx, operand);
164 mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
165 let operand = self.trans_operand(bcx, operand);
166 debug!("cast operand is {}", operand.repr(bcx));
167 let cast_ty = bcx.monomorphize(&cast_ty);
169 let val = match *kind {
170 mir::CastKind::ReifyFnPointer |
171 mir::CastKind::UnsafeFnPointer => {
172 // these are no-ops at the LLVM level
175 mir::CastKind::Unsize => {
176 // unsize targets other than to a fat pointer currently
177 // can't be operands.
178 assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
181 OperandValue::FatPtr(..) => {
182 // unsize from a fat pointer - this is a
183 // "trait-object-to-supertrait" coercion, for
185 // &'a fmt::Debug+Send => &'a fmt::Debug,
186 // and is a no-op at the LLVM level
189 OperandValue::Immediate(lldata) => {
191 let (lldata, llextra) =
192 base::unsize_thin_ptr(bcx, lldata,
193 operand.ty, cast_ty);
194 OperandValue::FatPtr(lldata, llextra)
196 OperandValue::Ref(_) => {
198 &format!("by-ref operand {} in trans_rvalue_operand",
203 mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
204 debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
205 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
206 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
207 let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty);
208 let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty);
209 let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
210 let repr = adt::represent_type(bcx.ccx(), operand.ty);
211 let llval = operand.immediate();
212 let discr = adt::trans_get_discr(bcx, &*repr, llval, None);
213 (discr, common::val_ty(discr), adt::is_discr_signed(&*repr))
215 (operand.immediate(), ll_t_in, operand.ty.is_signed())
218 let newval = match (r_t_in, r_t_out) {
219 (CastTy::Int(_), CastTy::Int(_)) => {
220 let srcsz = ll_t_in.int_width();
221 let dstsz = ll_t_out.int_width();
223 build::BitCast(bcx, llval, ll_t_out)
224 } else if srcsz > dstsz {
225 build::Trunc(bcx, llval, ll_t_out)
227 build::SExt(bcx, llval, ll_t_out)
229 build::ZExt(bcx, llval, ll_t_out)
232 (CastTy::Float, CastTy::Float) => {
233 let srcsz = ll_t_in.float_width();
234 let dstsz = ll_t_out.float_width();
236 build::FPExt(bcx, llval, ll_t_out)
237 } else if srcsz > dstsz {
238 build::FPTrunc(bcx, llval, ll_t_out)
243 (CastTy::Ptr(_), CastTy::Ptr(_)) |
244 (CastTy::FnPtr, CastTy::Ptr(_)) |
245 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
246 build::PointerCast(bcx, llval, ll_t_out),
247 (CastTy::Ptr(_), CastTy::Int(_)) |
248 (CastTy::FnPtr, CastTy::Int(_)) =>
249 build::PtrToInt(bcx, llval, ll_t_out),
250 (CastTy::Int(_), CastTy::Ptr(_)) =>
251 build::IntToPtr(bcx, llval, ll_t_out),
252 (CastTy::Int(_), CastTy::Float) if signed =>
253 build::SIToFP(bcx, llval, ll_t_out),
254 (CastTy::Int(_), CastTy::Float) =>
255 build::UIToFP(bcx, llval, ll_t_out),
256 (CastTy::Float, CastTy::Int(IntTy::I)) =>
257 build::FPToSI(bcx, llval, ll_t_out),
258 (CastTy::Float, CastTy::Int(_)) =>
259 build::FPToUI(bcx, llval, ll_t_out),
260 _ => bcx.ccx().sess().bug(
261 &format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
264 OperandValue::Immediate(newval)
266 mir::CastKind::Misc => { // Casts from a fat-ptr.
267 let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty);
268 let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty);
269 if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
270 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
271 let ll_cft = ll_cast_ty.field_types();
272 let ll_fft = ll_from_ty.field_types();
273 let data_cast = build::PointerCast(bcx, data_ptr, ll_cft[0]);
274 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
275 OperandValue::FatPtr(data_cast, meta_ptr)
276 } else { // cast to thin-ptr
277 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
278 // pointer-cast of that pointer to desired pointer type.
279 let llval = build::PointerCast(bcx, data_ptr, ll_cast_ty);
280 OperandValue::Immediate(llval)
283 panic!("Unexpected non-FatPtr operand")
293 mir::Rvalue::Ref(_, bk, ref lvalue) => {
294 let tr_lvalue = self.trans_lvalue(bcx, lvalue);
296 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
297 let ref_ty = bcx.tcx().mk_ref(
298 bcx.tcx().mk_region(ty::ReStatic),
299 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
302 // Note: lvalues are indirect, so storing the `llval` into the
303 // destination effectively creates a reference.
304 if common::type_is_sized(bcx.tcx(), ty) {
306 val: OperandValue::Immediate(tr_lvalue.llval),
311 val: OperandValue::FatPtr(tr_lvalue.llval,
318 mir::Rvalue::Len(ref lvalue) => {
319 let tr_lvalue = self.trans_lvalue(bcx, lvalue);
321 val: OperandValue::Immediate(self.lvalue_len(bcx, tr_lvalue)),
322 ty: bcx.tcx().types.usize,
326 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
327 let lhs = self.trans_operand(bcx, lhs);
328 let rhs = self.trans_operand(bcx, rhs);
329 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
330 match (lhs.val, rhs.val) {
331 (OperandValue::FatPtr(lhs_addr, lhs_extra),
332 OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
333 base::compare_fat_ptrs(bcx,
336 lhs.ty, op.to_hir_binop(),
343 self.trans_scalar_binop(bcx, op,
344 lhs.immediate(), rhs.immediate(),
345 lhs.ty, DebugLoc::None)
348 val: OperandValue::Immediate(llresult),
349 ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
353 mir::Rvalue::UnaryOp(op, ref operand) => {
354 let operand = self.trans_operand(bcx, operand);
355 let lloperand = operand.immediate();
356 let is_float = operand.ty.is_fp();
357 let debug_loc = DebugLoc::None;
358 let llval = match op {
359 mir::UnOp::Not => build::Not(bcx, lloperand, debug_loc),
360 mir::UnOp::Neg => if is_float {
361 build::FNeg(bcx, lloperand, debug_loc)
363 build::Neg(bcx, lloperand, debug_loc)
367 val: OperandValue::Immediate(llval),
372 mir::Rvalue::Box(content_ty) => {
373 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
374 let llty = type_of::type_of(bcx.ccx(), content_ty);
375 let llsize = machine::llsize_of(bcx.ccx(), llty);
376 let align = type_of::align_of(bcx.ccx(), content_ty);
377 let llalign = common::C_uint(bcx.ccx(), align);
378 let llty_ptr = llty.ptr_to();
379 let box_ty = bcx.tcx().mk_box(content_ty);
380 let Result { bcx, val: llval } = base::malloc_raw_dyn(bcx,
387 val: OperandValue::Immediate(llval),
392 mir::Rvalue::Repeat(..) |
393 mir::Rvalue::Aggregate(..) |
394 mir::Rvalue::Slice { .. } |
395 mir::Rvalue::InlineAsm(..) => {
396 bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
401 pub fn trans_scalar_binop(&mut self,
402 bcx: Block<'bcx, 'tcx>,
407 debug_loc: DebugLoc) -> ValueRef {
408 let is_float = input_ty.is_fp();
409 let is_signed = input_ty.is_signed();
411 mir::BinOp::Add => if is_float {
412 build::FAdd(bcx, lhs, rhs, debug_loc)
414 build::Add(bcx, lhs, rhs, debug_loc)
416 mir::BinOp::Sub => if is_float {
417 build::FSub(bcx, lhs, rhs, debug_loc)
419 build::Sub(bcx, lhs, rhs, debug_loc)
421 mir::BinOp::Mul => if is_float {
422 build::FMul(bcx, lhs, rhs, debug_loc)
424 build::Mul(bcx, lhs, rhs, debug_loc)
426 mir::BinOp::Div => if is_float {
427 build::FDiv(bcx, lhs, rhs, debug_loc)
428 } else if is_signed {
429 build::SDiv(bcx, lhs, rhs, debug_loc)
431 build::UDiv(bcx, lhs, rhs, debug_loc)
433 mir::BinOp::Rem => if is_float {
434 // LLVM currently always lowers the `frem` instructions appropriate
435 // library calls typically found in libm. Notably f64 gets wired up
436 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
437 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
438 // instead just an inline function in a header that goes up to a
439 // f64, uses `fmod`, and then comes back down to a f32.
441 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
442 // still unconditionally lower frem instructions over 32-bit floats
443 // to a call to `fmodf`. To work around this we special case MSVC
444 // 32-bit float rem instructions and instead do the call out to
447 // Note that this is currently duplicated with src/libcore/ops.rs
448 // which does the same thing, and it would be nice to perhaps unify
449 // these two implementations one day! Also note that we call `fmod`
450 // for both 32 and 64-bit floats because if we emit any FRem
451 // instruction at all then LLVM is capable of optimizing it into a
452 // 32-bit FRem (which we're trying to avoid).
454 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
455 tcx.sess.target.target.arch == "x86";
457 let f64t = Type::f64(bcx.ccx());
458 let fty = Type::func(&[f64t, f64t], &f64t);
459 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
461 if input_ty == tcx.types.f32 {
462 let lllhs = build::FPExt(bcx, lhs, f64t);
463 let llrhs = build::FPExt(bcx, rhs, f64t);
464 let llres = build::Call(bcx, llfn, &[lllhs, llrhs],
466 build::FPTrunc(bcx, llres, Type::f32(bcx.ccx()))
468 build::Call(bcx, llfn, &[lhs, rhs],
472 build::FRem(bcx, lhs, rhs, debug_loc)
474 } else if is_signed {
475 build::SRem(bcx, lhs, rhs, debug_loc)
477 build::URem(bcx, lhs, rhs, debug_loc)
479 mir::BinOp::BitOr => build::Or(bcx, lhs, rhs, debug_loc),
480 mir::BinOp::BitAnd => build::And(bcx, lhs, rhs, debug_loc),
481 mir::BinOp::BitXor => build::Xor(bcx, lhs, rhs, debug_loc),
482 mir::BinOp::Shl => common::build_unchecked_lshift(bcx,
486 mir::BinOp::Shr => common::build_unchecked_rshift(bcx,
491 mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
492 mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
493 base::compare_scalar_types(bcx, lhs, rhs, input_ty,
494 op.to_hir_binop(), debug_loc)
500 pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
502 mir::Rvalue::Use(..) | // (*)
503 mir::Rvalue::Ref(..) |
504 mir::Rvalue::Len(..) |
505 mir::Rvalue::Cast(..) | // (*)
506 mir::Rvalue::BinaryOp(..) |
507 mir::Rvalue::UnaryOp(..) |
508 mir::Rvalue::Box(..) =>
510 mir::Rvalue::Repeat(..) |
511 mir::Rvalue::Aggregate(..) |
512 mir::Rvalue::Slice { .. } |
513 mir::Rvalue::InlineAsm(..) =>
517 // (*) this is only true if the type is suitable